From 2f6c77337e6329f17590ef758fb9f811968e9378 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 21 Mar 2018 06:26:38 -0400 Subject: [PATCH 01/27] Remove 6.1.5 version constant The assumption here is that we will no longer be making a release from the 6.1 branch. Since we assume that all versions on this branch are actually released, we do not want to leave behind any versions that would require a snapshot build. We do have a test that verifies that all released versions are present here, so if another release is performed from the 6.1 branch, that test will fail and we will know to add the version constant at that time. --- server/src/main/java/org/elasticsearch/Version.java | 4 ---- 1 file changed, 4 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 799e1ee99b31a..0f6e79f26ec39 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -149,8 +149,6 @@ public class Version implements Comparable { public static final Version V_6_1_3 = new Version(V_6_1_3_ID, org.apache.lucene.util.Version.LUCENE_7_1_0); public static final int V_6_1_4_ID = 6010499; public static final Version V_6_1_4 = new Version(V_6_1_4_ID, org.apache.lucene.util.Version.LUCENE_7_1_0); - public static final int V_6_1_5_ID = 6010599; - public static final Version V_6_1_5 = new Version(V_6_1_5_ID, org.apache.lucene.util.Version.LUCENE_7_1_0); public static final int V_6_2_0_ID = 6020099; public static final Version V_6_2_0 = new Version(V_6_2_0_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); public static final int V_6_2_1_ID = 6020199; @@ -193,8 +191,6 @@ public static Version fromId(int id) { return V_6_2_1; case V_6_2_0_ID: return V_6_2_0; - case V_6_1_5_ID: - return V_6_1_5; case V_6_1_4_ID: return V_6_1_4; case V_6_1_3_ID: From 93ff973afc0ecfaa8a4d587046c8642953f77bb0 Mon Sep 17 00:00:00 2001 From: markharwood Date: Wed, 21 Mar 2018 10:42:14 +0000 Subject: [PATCH 02/27] Tests - fix incorrect test assumption that zero-doc buckets will be returned by the adjacency matrix aggregation. Closes #29159 (#29167) --- .../bucket/adjacency/InternalAdjacencyMatrixTests.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/adjacency/InternalAdjacencyMatrixTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/adjacency/InternalAdjacencyMatrixTests.java index 742e769ed4082..2ba97251b313c 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/adjacency/InternalAdjacencyMatrixTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/adjacency/InternalAdjacencyMatrixTests.java @@ -82,8 +82,10 @@ protected void assertReduced(InternalAdjacencyMatrix reduced, List expectedCounts = new TreeMap<>(); for (InternalAdjacencyMatrix input : inputs) { for (InternalAdjacencyMatrix.InternalBucket bucket : input.getBuckets()) { - expectedCounts.compute(bucket.getKeyAsString(), + if (bucket.getDocCount() > 0) { + expectedCounts.compute(bucket.getKeyAsString(), (key, oldValue) -> (oldValue == null ? 0 : oldValue) + bucket.getDocCount()); + } } } final Map actualCounts = new TreeMap<>(); From ad7e8bab6f51a49d6ea8f465d35d1ea55c048082 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 21 Mar 2018 08:55:12 -0400 Subject: [PATCH 03/27] Revive build Javadocs on JDK 10 and workaround bug (#29173) This commit reenables the Javadoc tasks on JDK 10. To reenable these tasks, we have to workaround a bug in JDK 10 which trips on some deeply nested anonymous classes that we have in the codebase (and are fine as-is, this is not a problem with this code). The workaround is to remove the compiled classes from the classpath. This has been reported upstream and the workaround was suggested there (see the code comment). --- .../org/elasticsearch/gradle/BuildPlugin.groovy | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 6043ce210906a..5eb82c12616fc 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -475,14 +475,18 @@ class BuildPlugin implements Plugin { } static void configureJavadoc(Project project) { - project.tasks.withType(Javadoc) { - executable = new File(project.compilerJavaHome, 'bin/javadoc') + // remove compiled classes from the Javadoc classpath: http://mail.openjdk.java.net/pipermail/javadoc-dev/2018-January/000400.html + final List classes = new ArrayList<>() + project.tasks.withType(JavaCompile) { javaCompile -> + classes.add(javaCompile.destinationDir) } - configureJavadocJar(project) - if (project.compilerJavaVersion == JavaVersion.VERSION_1_10) { - project.tasks.withType(Javadoc) { it.enabled = false } - project.tasks.getByName('javadocJar').each { it.enabled = false } + project.tasks.withType(Javadoc) { javadoc -> + javadoc.executable = new File(project.compilerJavaHome, 'bin/javadoc') + javadoc.classpath = javadoc.getClasspath().filter { f -> + return classes.contains(f) == false + } } + configureJavadocJar(project) } /** Adds a javadocJar task to generate a jar containing javadocs. */ From f8830b7b43e51935134817269dcebe4766338ace Mon Sep 17 00:00:00 2001 From: David Roberts Date: Wed, 21 Mar 2018 11:16:22 +0000 Subject: [PATCH 04/27] [TEST] Mute index synced flush rest tests Awaiting fix of #29162 --- .../resources/rest-api-spec/test/cat.shards/10_basic.yml | 4 ++++ .../resources/rest-api-spec/test/indices.flush/10_basic.yml | 5 +++++ 2 files changed, 9 insertions(+) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml index 53b9b741bdc61..4d63b4fd7c797 100755 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml @@ -80,6 +80,10 @@ $/ --- "Test cat shards output": + - skip: + # The BWC logic for released 6.0 and 6.1 versions is wrong + version: "all" + reason: "AwaitsFix'ing, see elasticsearch #29162" - do: cat.shards: {} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.flush/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.flush/10_basic.yml index 659435ae19615..70d284c920066 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.flush/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.flush/10_basic.yml @@ -1,5 +1,10 @@ --- "Index synced flush rest test": + - skip: + # The BWC logic for released 6.0 and 6.1 versions is wrong + version: "all" + reason: "AwaitsFix'ing, see elasticsearch #29162" + - do: indices.create: index: testing From 7d44d7577441c8cfd6b321e375b661cfe89053ca Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Wed, 21 Mar 2018 15:38:01 -0400 Subject: [PATCH 05/27] Adjust PreSyncedFlushResponse bwc versions We discussed and agreed to include the synced-flush change in 6.3.0+ but not in 5.6.9. We will re-evaluate the urgency and importance of the issue then decide which versions that the change should be included. --- .../elasticsearch/indices/flush/SyncedFlushService.java | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java index b8b294a90d422..fb572d015ed43 100644 --- a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java +++ b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java @@ -561,11 +561,14 @@ static final class PreSyncedFlushResponse extends TransportResponse { } boolean includeNumDocs(Version version) { - return version.onOrAfter(Version.V_5_6_8); + if (version.major == Version.V_5_6_8.major) { + return version.onOrAfter(Version.V_5_6_8); + } + return version.onOrAfter(Version.V_6_2_2); } boolean includeExistingSyncId(Version version) { - return version.onOrAfter(Version.V_5_6_9); + return version.onOrAfter(Version.V_6_3_0); } @Override From 8cfe619f0318a33406f14f58069be38e26f9ed86 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Wed, 21 Mar 2018 16:51:52 -0400 Subject: [PATCH 06/27] Revert "[TEST] Mute index synced flush rest tests" The BWC issue was fixed. This reverts commit f8830b7b43e51935134817269dcebe4766338ace. --- .../resources/rest-api-spec/test/cat.shards/10_basic.yml | 4 ---- .../resources/rest-api-spec/test/indices.flush/10_basic.yml | 5 ----- 2 files changed, 9 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml index 4d63b4fd7c797..53b9b741bdc61 100755 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml @@ -80,10 +80,6 @@ $/ --- "Test cat shards output": - - skip: - # The BWC logic for released 6.0 and 6.1 versions is wrong - version: "all" - reason: "AwaitsFix'ing, see elasticsearch #29162" - do: cat.shards: {} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.flush/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.flush/10_basic.yml index 70d284c920066..659435ae19615 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.flush/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.flush/10_basic.yml @@ -1,10 +1,5 @@ --- "Index synced flush rest test": - - skip: - # The BWC logic for released 6.0 and 6.1 versions is wrong - version: "all" - reason: "AwaitsFix'ing, see elasticsearch #29162" - - do: indices.create: index: testing From edf27a599e178a13bbf2256676d27d65756c653e Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Thu, 22 Mar 2018 09:18:07 +0100 Subject: [PATCH 07/27] Add new setting to disable persistent tasks allocations (#29137) This commit adds a new setting `cluster.persistent_tasks.allocation.enable` that can be used to enable or disable the allocation of persistent tasks. The setting accepts the values `all` (default) or `none`. When set to none, the persistent tasks that are created (or that must be reassigned) won't be assigned to a node but will reside in the cluster state with a no "executor node" and a reason describing why it is not assigned: ``` "assignment" : { "executor_node" : null, "explanation" : "persistent task [foo/bar] cannot be assigned [no persistent task assignments are allowed due to cluster settings]" } ``` --- docs/reference/modules/cluster/misc.asciidoc | 26 +++ .../common/settings/ClusterSettings.java | 4 +- .../PersistentTasksClusterService.java | 20 +- .../decider/AssignmentDecision.java | 72 ++++++++ .../decider/EnableAssignmentDecider.java | 101 ++++++++++ .../persistent/package-info.java | 2 +- .../PersistentTasksClusterServiceTests.java | 90 ++++++++- .../PersistentTasksDecidersTestCase.java | 134 ++++++++++++++ .../decider/AssignmentDecisionTests.java | 33 ++++ .../decider/EnableAssignmentDeciderIT.java | 173 ++++++++++++++++++ .../decider/EnableAssignmentDeciderTests.java | 52 ++++++ 11 files changed, 699 insertions(+), 8 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/persistent/decider/AssignmentDecision.java create mode 100644 server/src/main/java/org/elasticsearch/persistent/decider/EnableAssignmentDecider.java create mode 100644 server/src/test/java/org/elasticsearch/persistent/PersistentTasksDecidersTestCase.java create mode 100644 server/src/test/java/org/elasticsearch/persistent/decider/AssignmentDecisionTests.java create mode 100644 server/src/test/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java create mode 100644 server/src/test/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderTests.java diff --git a/docs/reference/modules/cluster/misc.asciidoc b/docs/reference/modules/cluster/misc.asciidoc index 3963312c0f4ea..4edcd34e00f5d 100644 --- a/docs/reference/modules/cluster/misc.asciidoc +++ b/docs/reference/modules/cluster/misc.asciidoc @@ -56,3 +56,29 @@ PUT /_cluster/settings } ------------------------------- // CONSOLE + + +[[persistent-tasks-allocation]] +==== Persistent Tasks Allocations + +Plugins can create a kind of tasks called persistent tasks. Those tasks are +usually long-live tasks and are stored in the cluster state, allowing the +tasks to be revived after a full cluster restart. + +Every time a persistent task is created, the master nodes takes care of +assigning the task to a node of the cluster, and the assigned node will then +pick up the task and execute it locally. The process of assigning persistent +tasks to nodes is controlled by the following property, which can be updated +dynamically: + +`cluster.persistent_tasks.allocation.enable`:: ++ +-- +Enable or disable allocation for persistent tasks: + +* `all` - (default) Allows persistent tasks to be assigned to nodes +* `none` - No allocations are allowed for any type of persistent task + +This setting does not affect the persistent tasks that are already being executed. +Only newly created persistent tasks, or tasks that must be reassigned (after a node +left the cluster, for example), are impacted by this setting. diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 804340d63ed11..bcfed3388e9f2 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -79,6 +79,7 @@ import org.elasticsearch.monitor.os.OsService; import org.elasticsearch.monitor.process.ProcessService; import org.elasticsearch.node.Node; +import org.elasticsearch.persistent.decider.EnableAssignmentDecider; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.rest.BaseRestHandler; @@ -420,6 +421,7 @@ public void apply(Settings value, Settings current, Settings previous) { FastVectorHighlighter.SETTING_TV_HIGHLIGHT_MULTI_VALUE, Node.BREAKER_TYPE_KEY, OperationRouting.USE_ADAPTIVE_REPLICA_SELECTION_SETTING, - IndexGraveyard.SETTING_MAX_TOMBSTONES + IndexGraveyard.SETTING_MAX_TOMBSTONES, + EnableAssignmentDecider.CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING ))); } diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java index 9e064c3d20924..cf44556ee5ddc 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java @@ -34,6 +34,8 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.persistent.PersistentTasksCustomMetaData.Assignment; import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; +import org.elasticsearch.persistent.decider.AssignmentDecision; +import org.elasticsearch.persistent.decider.EnableAssignmentDecider; import org.elasticsearch.tasks.Task; import java.util.Objects; @@ -45,12 +47,14 @@ public class PersistentTasksClusterService extends AbstractComponent implements private final ClusterService clusterService; private final PersistentTasksExecutorRegistry registry; + private final EnableAssignmentDecider decider; public PersistentTasksClusterService(Settings settings, PersistentTasksExecutorRegistry registry, ClusterService clusterService) { super(settings); this.clusterService = clusterService; clusterService.addListener(this); this.registry = registry; + this.decider = new EnableAssignmentDecider(settings, clusterService.getClusterSettings()); } /** @@ -224,6 +228,12 @@ private Assignment createAssignment(final final @Nullable Params taskParams, final ClusterState currentState) { PersistentTasksExecutor persistentTasksExecutor = registry.getPersistentTaskExecutorSafe(taskName); + + AssignmentDecision decision = decider.canAssign(); + if (decision.getType() == AssignmentDecision.Type.NO) { + return new Assignment(null, "persistent task [" + taskName + "] cannot be assigned [" + decision.getReason() + "]"); + } + return persistentTasksExecutor.getAssignment(taskParams, currentState); } @@ -249,7 +259,8 @@ public void onFailure(String source, Exception e) { /** * Returns true if the cluster state change(s) require to reassign some persistent tasks. It can happen in the following - * situations: a node left or is added, the routing table changed, the master node changed or the persistent tasks changed. + * situations: a node left or is added, the routing table changed, the master node changed, the metadata changed or the + * persistent tasks changed. */ boolean shouldReassignPersistentTasks(final ClusterChangedEvent event) { final PersistentTasksCustomMetaData tasks = event.state().getMetaData().custom(PersistentTasksCustomMetaData.TYPE); @@ -259,7 +270,12 @@ boolean shouldReassignPersistentTasks(final ClusterChangedEvent event) { boolean masterChanged = event.previousState().nodes().isLocalNodeElectedMaster() == false; - if (persistentTasksChanged(event) || event.nodesChanged() || event.routingTableChanged() || masterChanged) { + if (persistentTasksChanged(event) + || event.nodesChanged() + || event.routingTableChanged() + || event.metaDataChanged() + || masterChanged) { + for (PersistentTask task : tasks.tasks()) { if (needsReassignment(task.getAssignment(), event.state().nodes())) { Assignment assignment = createAssignment(task.getTaskName(), task.getParams(), event.state()); diff --git a/server/src/main/java/org/elasticsearch/persistent/decider/AssignmentDecision.java b/server/src/main/java/org/elasticsearch/persistent/decider/AssignmentDecision.java new file mode 100644 index 0000000000000..eb8f851a68dab --- /dev/null +++ b/server/src/main/java/org/elasticsearch/persistent/decider/AssignmentDecision.java @@ -0,0 +1,72 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.persistent.decider; + +import java.util.Locale; +import java.util.Objects; + +/** + * {@link AssignmentDecision} represents the decision made during the process of + * assigning a persistent task to a node of the cluster. + * + * @see EnableAssignmentDecider + */ +public final class AssignmentDecision { + + public static final AssignmentDecision YES = new AssignmentDecision(Type.YES, ""); + + private final Type type; + private final String reason; + + public AssignmentDecision(final Type type, final String reason) { + this.type = Objects.requireNonNull(type); + this.reason = Objects.requireNonNull(reason); + } + + public Type getType() { + return type; + } + + public String getReason() { + return reason; + } + + @Override + public String toString() { + return "assignment decision [type=" + type + ", reason=" + reason + "]"; + } + + public enum Type { + NO(0), YES(1); + + private final int id; + + Type(int id) { + this.id = id; + } + + public int getId() { + return id; + } + + public static Type resolve(final String s) { + return Type.valueOf(s.toUpperCase(Locale.ROOT)); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/persistent/decider/EnableAssignmentDecider.java b/server/src/main/java/org/elasticsearch/persistent/decider/EnableAssignmentDecider.java new file mode 100644 index 0000000000000..525e1379a4098 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/persistent/decider/EnableAssignmentDecider.java @@ -0,0 +1,101 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.persistent.decider; + +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; + +import java.util.Locale; + +import static org.elasticsearch.common.settings.Setting.Property.Dynamic; +import static org.elasticsearch.common.settings.Setting.Property.NodeScope; + +/** + * {@link EnableAssignmentDecider} is used to allow/disallow the persistent tasks + * to be assigned to cluster nodes. + *

+ * Allocation settings can have the following values (non-casesensitive): + *

    + *
  • NONE - no persistent tasks can be assigned + *
  • ALL - all persistent tasks can be assigned to nodes + *
+ * + * @see Allocation + */ +public class EnableAssignmentDecider { + + public static final Setting CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING = + new Setting<>("cluster.persistent_tasks.allocation.enable", Allocation.ALL.toString(), Allocation::fromString, Dynamic, NodeScope); + + private volatile Allocation enableAssignment; + + public EnableAssignmentDecider(final Settings settings, final ClusterSettings clusterSettings) { + this.enableAssignment = CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING, this::setEnableAssignment); + } + + public void setEnableAssignment(final Allocation enableAssignment) { + this.enableAssignment = enableAssignment; + } + + /** + * Returns a {@link AssignmentDecision} whether the given persistent task can be assigned + * to a node of the cluster. The decision depends on the current value of the setting + * {@link EnableAssignmentDecider#CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING}. + * + * @return the {@link AssignmentDecision} + */ + public AssignmentDecision canAssign() { + if (enableAssignment == Allocation.NONE) { + return new AssignmentDecision(AssignmentDecision.Type.NO, "no persistent task assignments are allowed due to cluster settings"); + } + return AssignmentDecision.YES; + } + + /** + * Allocation values or rather their string representation to be used used with + * {@link EnableAssignmentDecider#CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING} + * via cluster settings. + */ + public enum Allocation { + + NONE, + ALL; + + public static Allocation fromString(final String strValue) { + if (strValue == null) { + return null; + } else { + String value = strValue.toUpperCase(Locale.ROOT); + try { + return valueOf(value); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException("Illegal value [" + value + "] for [" + + CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING.getKey() + "]"); + } + } + } + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/persistent/package-info.java b/server/src/main/java/org/elasticsearch/persistent/package-info.java index f948e3ace448e..3e71716e60643 100644 --- a/server/src/main/java/org/elasticsearch/persistent/package-info.java +++ b/server/src/main/java/org/elasticsearch/persistent/package-info.java @@ -30,7 +30,7 @@ * task. *

* 2. The master node updates the {@link org.elasticsearch.persistent.PersistentTasksCustomMetaData} in the cluster state to indicate - * that there is a new persistent task is running in the system. + * that there is a new persistent task running in the system. *

* 3. The {@link org.elasticsearch.persistent.PersistentTasksNodeService} running on every node in the cluster monitors changes in * the cluster state and starts execution of all new tasks assigned to the node it is running on. diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksClusterServiceTests.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksClusterServiceTests.java index e470c5028aa8f..916fdee213695 100644 --- a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksClusterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksClusterServiceTests.java @@ -36,9 +36,16 @@ import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestParams; import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestPersistentTasksExecutor; +import org.elasticsearch.persistent.decider.EnableAssignmentDecider; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; import java.util.ArrayList; import java.util.Arrays; @@ -52,14 +59,41 @@ import static org.elasticsearch.persistent.PersistentTasksClusterService.needsReassignment; import static org.elasticsearch.persistent.PersistentTasksClusterService.persistentTasksChanged; import static org.elasticsearch.persistent.PersistentTasksExecutor.NO_NODE_FOUND; +import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -import static org.mockito.Mockito.mock; public class PersistentTasksClusterServiceTests extends ESTestCase { + /** Needed by {@link ClusterService} **/ + private static ThreadPool threadPool; + /** Needed by {@link PersistentTasksClusterService} **/ + private ClusterService clusterService; + + @BeforeClass + public static void setUpThreadPool() { + threadPool = new TestThreadPool(PersistentTasksClusterServiceTests.class.getSimpleName()); + } + + @Before + public void setUp() throws Exception { + super.setUp(); + clusterService = createClusterService(threadPool); + } + + @AfterClass + public static void tearDownThreadPool() throws Exception { + terminate(threadPool); + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + clusterService.close(); + } + public void testReassignmentRequired() { final PersistentTasksClusterService service = createService((params, clusterState) -> "never_assign".equals(((TestParams) params).getTestParam()) ? NO_NODE_FOUND : randomNodeAssignment(clusterState.nodes()) @@ -81,6 +115,55 @@ public void testReassignmentRequired() { } } + public void testReassignmentRequiredOnMetadataChanges() { + EnableAssignmentDecider.Allocation allocation = randomFrom(EnableAssignmentDecider.Allocation.values()); + + DiscoveryNodes nodes = DiscoveryNodes.builder() + .add(new DiscoveryNode("_node", buildNewFakeTransportAddress(), Version.CURRENT)) + .localNodeId("_node") + .masterNodeId("_node") + .build(); + + boolean unassigned = randomBoolean(); + PersistentTasksCustomMetaData tasks = PersistentTasksCustomMetaData.builder() + .addTask("_task_1", TestPersistentTasksExecutor.NAME, null, new Assignment(unassigned ? null : "_node", "_reason")) + .build(); + + MetaData metaData = MetaData.builder() + .putCustom(PersistentTasksCustomMetaData.TYPE, tasks) + .persistentSettings(Settings.builder() + .put(EnableAssignmentDecider.CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING.getKey(), allocation.toString()) + .build()) + .build(); + + ClusterState previous = ClusterState.builder(new ClusterName("_name")) + .nodes(nodes) + .metaData(metaData) + .build(); + + ClusterState current; + + final boolean changed = randomBoolean(); + if (changed) { + allocation = randomValueOtherThan(allocation, () -> randomFrom(EnableAssignmentDecider.Allocation.values())); + + current = ClusterState.builder(previous) + .metaData(MetaData.builder(previous.metaData()) + .persistentSettings(Settings.builder() + .put(EnableAssignmentDecider.CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING.getKey(), allocation.toString()) + .build()) + .build()) + .build(); + } else { + current = ClusterState.builder(previous).build(); + } + + final ClusterChangedEvent event = new ClusterChangedEvent("test", current, previous); + + final PersistentTasksClusterService service = createService((params, clusterState) -> randomNodeAssignment(clusterState.nodes())); + assertThat(dumpEvent(event), service.shouldReassignPersistentTasks(event), equalTo(changed && unassigned)); + } + public void testReassignTasksWithNoTasks() { ClusterState clusterState = initialState(); assertThat(reassign(clusterState).metaData().custom(PersistentTasksCustomMetaData.TYPE), nullValue()); @@ -527,7 +610,6 @@ private DiscoveryNode newNode(String nodeId) { Version.CURRENT); } - private ClusterState initialState() { MetaData.Builder metaData = MetaData.builder(); RoutingTable.Builder routingTable = RoutingTable.builder(); @@ -558,7 +640,7 @@ private void changeRoutingTable(MetaData.Builder metaData, RoutingTable.Builder } /** Creates a PersistentTasksClusterService with a single PersistentTasksExecutor implemented by a BiFunction **/ - static

PersistentTasksClusterService createService(final BiFunction fn) { + private

PersistentTasksClusterService createService(final BiFunction fn) { PersistentTasksExecutorRegistry registry = new PersistentTasksExecutorRegistry(Settings.EMPTY, singleton(new PersistentTasksExecutor

(Settings.EMPTY, TestPersistentTasksExecutor.NAME, null) { @Override @@ -571,6 +653,6 @@ protected void nodeOperation(AllocatedPersistentTask task, P params, Task.Status throw new UnsupportedOperationException(); } })); - return new PersistentTasksClusterService(Settings.EMPTY, registry, mock(ClusterService.class)); + return new PersistentTasksClusterService(Settings.EMPTY, registry, clusterService); } } diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksDecidersTestCase.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksDecidersTestCase.java new file mode 100644 index 0000000000000..356e518198c52 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksDecidersTestCase.java @@ -0,0 +1,134 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.persistent; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; + +import java.util.function.Predicate; + +import static java.util.Collections.emptyList; +import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; + +public abstract class PersistentTasksDecidersTestCase extends ESTestCase { + + /** Needed by {@link ClusterService} **/ + private static ThreadPool threadPool; + /** Needed by {@link PersistentTasksClusterService} **/ + private ClusterService clusterService; + + private PersistentTasksClusterService persistentTasksClusterService; + + @BeforeClass + public static void setUpThreadPool() { + threadPool = new TestThreadPool(getTestClass().getSimpleName()); + } + + @Before + public void setUp() throws Exception { + super.setUp(); + clusterService = createClusterService(threadPool); + PersistentTasksExecutorRegistry registry = new PersistentTasksExecutorRegistry(clusterService.getSettings(), emptyList()) { + @Override + public PersistentTasksExecutor getPersistentTaskExecutorSafe(String taskName) { + return new PersistentTasksExecutor(clusterService.getSettings(), taskName, null) { + @Override + protected void nodeOperation(AllocatedPersistentTask task, Params params, Task.Status status) { + logger.debug("Executing task {}", task); + } + }; + } + }; + persistentTasksClusterService = new PersistentTasksClusterService(clusterService.getSettings(), registry, clusterService); + } + + @AfterClass + public static void tearDownThreadPool() throws Exception { + terminate(threadPool); + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + clusterService.close(); + } + + protected ClusterState reassign(final ClusterState clusterState) { + return persistentTasksClusterService.reassignTasks(clusterState); + } + + protected void updateSettings(final Settings settings) { + ClusterSettings clusterSettings = clusterService.getClusterSettings(); + Settings.Builder updated = Settings.builder(); + clusterSettings.updateDynamicSettings(settings, updated, Settings.builder(), getTestClass().getName()); + clusterSettings.applySettings(updated.build()); + } + + protected static ClusterState createClusterStateWithTasks(final int nbNodes, final int nbTasks) { + DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(); + for (int i = 0; i < nbNodes; i++) { + nodes.add(new DiscoveryNode("_node_" + i, buildNewFakeTransportAddress(), Version.CURRENT)); + } + + PersistentTasksCustomMetaData.Builder tasks = PersistentTasksCustomMetaData.builder(); + for (int i = 0; i < nbTasks; i++) { + tasks.addTask("_task_" + i, "test", null, new PersistentTasksCustomMetaData.Assignment(null, "initialized")); + } + + MetaData metaData = MetaData.builder() + .putCustom(PersistentTasksCustomMetaData.TYPE, tasks.build()) + .build(); + + return ClusterState.builder(ClusterName.DEFAULT).nodes(nodes).metaData(metaData).build(); + } + + /** Asserts that the given cluster state contains nbTasks tasks that are assigned **/ + protected static void assertNbAssignedTasks(final long nbTasks, final ClusterState clusterState) { + assertPersistentTasks(nbTasks, clusterState, PersistentTasksCustomMetaData.PersistentTask::isAssigned); + } + + /** Asserts that the given cluster state contains nbTasks tasks that are NOT assigned **/ + protected static void assertNbUnassignedTasks(final long nbTasks, final ClusterState clusterState) { + assertPersistentTasks(nbTasks, clusterState, task -> task.isAssigned() == false); + } + + /** Asserts that the cluster state contains nbTasks tasks that verify the given predicate **/ + protected static void assertPersistentTasks(final long nbTasks, + final ClusterState clusterState, + final Predicate predicate) { + PersistentTasksCustomMetaData tasks = clusterState.metaData().custom(PersistentTasksCustomMetaData.TYPE); + assertNotNull("Persistent tasks must be not null", tasks); + assertEquals(nbTasks, tasks.tasks().stream().filter(predicate).count()); + } +} diff --git a/server/src/test/java/org/elasticsearch/persistent/decider/AssignmentDecisionTests.java b/server/src/test/java/org/elasticsearch/persistent/decider/AssignmentDecisionTests.java new file mode 100644 index 0000000000000..3fa580e726a83 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/persistent/decider/AssignmentDecisionTests.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.persistent.decider; + +import org.elasticsearch.test.ESTestCase; + +public class AssignmentDecisionTests extends ESTestCase { + + public void testConstantsTypes() { + assertEquals(AssignmentDecision.Type.YES, AssignmentDecision.YES.getType()); + } + + public void testResolveFromType() { + final AssignmentDecision.Type expected = randomFrom(AssignmentDecision.Type.values()); + assertEquals(expected, AssignmentDecision.Type.resolve(expected.toString())); + } +} diff --git a/server/src/test/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java b/server/src/test/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java new file mode 100644 index 0000000000000..15d12fb1ce932 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java @@ -0,0 +1,173 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.persistent.decider; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.persistent.PersistentTaskParams; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; +import org.elasticsearch.persistent.PersistentTasksService; +import org.elasticsearch.persistent.TestPersistentTasksPlugin; +import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestParams; +import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestPersistentTasksExecutor; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; + +import java.util.Collection; +import java.util.concurrent.CountDownLatch; + +import static java.util.Collections.singletonList; +import static org.elasticsearch.persistent.decider.EnableAssignmentDecider.Allocation; +import static org.elasticsearch.persistent.decider.EnableAssignmentDecider.CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; + +@ESIntegTestCase.ClusterScope(minNumDataNodes = 1) +public class EnableAssignmentDeciderIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return singletonList(TestPersistentTasksPlugin.class); + } + + @Override + protected Collection> transportClientPlugins() { + return nodePlugins(); + } + + @Override + protected boolean ignoreExternalCluster() { + return true; + } + + /** + * Test that the {@link EnableAssignmentDecider#CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING} setting correctly + * prevents persistent tasks to be assigned after a cluster restart. + */ + public void testEnableAssignmentAfterRestart() throws Exception { + final int numberOfTasks = randomIntBetween(1, 10); + logger.trace("creating {} persistent tasks", numberOfTasks); + + final CountDownLatch latch = new CountDownLatch(numberOfTasks); + for (int i = 0; i < numberOfTasks; i++) { + PersistentTasksService service = internalCluster().getInstance(PersistentTasksService.class); + service.startPersistentTask("task_" + i, TestPersistentTasksExecutor.NAME, randomTaskParams(), + new ActionListener>() { + @Override + public void onResponse(PersistentTask task) { + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + latch.countDown(); + } + }); + } + latch.await(); + + ClusterService clusterService = internalCluster().clusterService(internalCluster().getMasterName()); + PersistentTasksCustomMetaData tasks = clusterService.state().getMetaData().custom(PersistentTasksCustomMetaData.TYPE); + assertEquals(numberOfTasks, tasks.tasks().stream().filter(t -> TestPersistentTasksExecutor.NAME.equals(t.getTaskName())).count()); + + logger.trace("waiting for the tasks to be running"); + assertBusy(() -> { + ListTasksResponse listTasks = client().admin().cluster().prepareListTasks() + .setActions(TestPersistentTasksExecutor.NAME + "[c]") + .get(); + assertThat(listTasks.getTasks().size(), equalTo(numberOfTasks)); + }); + + try { + logger.trace("disable persistent tasks assignment"); + disablePersistentTasksAssignment(); + + logger.trace("restart the cluster"); + internalCluster().fullRestart(); + ensureYellow(); + + logger.trace("persistent tasks assignment is still disabled"); + assertEnableAssignmentSetting(Allocation.NONE); + + logger.trace("persistent tasks are not assigned"); + tasks = internalCluster().clusterService().state().getMetaData().custom(PersistentTasksCustomMetaData.TYPE); + assertEquals(numberOfTasks, tasks.tasks().stream() + .filter(t -> TestPersistentTasksExecutor.NAME.equals(t.getTaskName())) + .filter(t -> t.isAssigned() == false) + .count()); + + ListTasksResponse runningTasks = client().admin().cluster().prepareListTasks() + .setActions(TestPersistentTasksExecutor.NAME + "[c]") + .get(); + assertThat(runningTasks.getTasks().size(), equalTo(0)); + + logger.trace("enable persistent tasks assignment"); + if (randomBoolean()) { + enablePersistentTasksAssignment(); + } else { + resetPersistentTasksAssignment(); + } + + assertBusy(() -> { + ListTasksResponse listTasks = client().admin().cluster().prepareListTasks() + .setActions(TestPersistentTasksExecutor.NAME + "[c]") + .get(); + assertThat(listTasks.getTasks().size(), equalTo(numberOfTasks)); + }); + + } finally { + resetPersistentTasksAssignment(); + } + } + + private void assertEnableAssignmentSetting(final Allocation expected) { + ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().clear().setMetaData(true).get(); + Settings settings = clusterStateResponse.getState().getMetaData().settings(); + + String value = settings.get(CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING.getKey()); + assertThat(Allocation.fromString(value), equalTo(expected)); + } + + private void disablePersistentTasksAssignment() { + Settings.Builder settings = Settings.builder().put(CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING.getKey(), Allocation.NONE); + assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings)); + } + + private void enablePersistentTasksAssignment() { + Settings.Builder settings = Settings.builder().put(CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING.getKey(), Allocation.ALL); + assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings)); + } + + private void resetPersistentTasksAssignment() { + Settings.Builder settings = Settings.builder().putNull(CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING.getKey()); + assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings)); + } + + /** Returns a random task parameter **/ + private static PersistentTaskParams randomTaskParams() { + if (randomBoolean()) { + return null; + } + return new TestParams(randomAlphaOfLength(10)); + } +} diff --git a/server/src/test/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderTests.java b/server/src/test/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderTests.java new file mode 100644 index 0000000000000..7aedde1ab9b60 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderTests.java @@ -0,0 +1,52 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.persistent.decider; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.persistent.PersistentTasksDecidersTestCase; + +public class EnableAssignmentDeciderTests extends PersistentTasksDecidersTestCase { + + public void testAllocationValues() { + final String all = randomFrom("all", "All", "ALL"); + assertEquals(EnableAssignmentDecider.Allocation.ALL, EnableAssignmentDecider.Allocation.fromString(all)); + + final String none = randomFrom("none", "None", "NONE"); + assertEquals(EnableAssignmentDecider.Allocation.NONE, EnableAssignmentDecider.Allocation.fromString(none)); + } + + public void testEnableAssignment() { + final int nbTasks = randomIntBetween(1, 10); + final int nbNodes = randomIntBetween(1, 5); + final EnableAssignmentDecider.Allocation allocation = randomFrom(EnableAssignmentDecider.Allocation.values()); + + Settings settings = Settings.builder() + .put(EnableAssignmentDecider.CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING.getKey(), allocation.toString()) + .build(); + updateSettings(settings); + + ClusterState clusterState = reassign(createClusterStateWithTasks(nbNodes, nbTasks)); + if (allocation == EnableAssignmentDecider.Allocation.ALL) { + assertNbAssignedTasks(nbTasks, clusterState); + } else { + assertNbUnassignedTasks(nbTasks, clusterState); + } + } +} From d6d3fb3c7364fc6ab45a64135ded0c880e96a3a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Thu, 22 Mar 2018 11:14:24 +0100 Subject: [PATCH 08/27] Use EnumMap in ClusterBlocks (#29112) By using EnumMap instead of an ImmutableLevelHolder array we can avoid the using enum ordinals to index into the array. --- .../cluster/block/ClusterBlocks.java | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java index 9e05d50831882..ee4779bc8c514 100644 --- a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java +++ b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java @@ -20,6 +20,7 @@ package org.elasticsearch.cluster.block; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; + import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -30,6 +31,7 @@ import org.elasticsearch.rest.RestStatus; import java.io.IOException; +import java.util.EnumMap; import java.util.HashMap; import java.util.HashSet; import java.util.Map; @@ -53,7 +55,7 @@ public class ClusterBlocks extends AbstractDiffable { private final ImmutableOpenMap> indicesBlocks; - private final ImmutableLevelHolder[] levelHolders; + private final EnumMap levelHolders; ClusterBlocks(Set global, ImmutableOpenMap> indicesBlocks) { this.global = global; @@ -70,20 +72,20 @@ public ImmutableOpenMap> indices() { } public Set global(ClusterBlockLevel level) { - return levelHolders[level.ordinal()].global(); + return levelHolders.get(level).global(); } public ImmutableOpenMap> indices(ClusterBlockLevel level) { - return levelHolders[level.ordinal()].indices(); + return levelHolders.get(level).indices(); } private Set blocksForIndex(ClusterBlockLevel level, String index) { return indices(level).getOrDefault(index, emptySet()); } - private static ImmutableLevelHolder[] generateLevelHolders(Set global, - ImmutableOpenMap> indicesBlocks) { - ImmutableLevelHolder[] levelHolders = new ImmutableLevelHolder[ClusterBlockLevel.values().length]; + private static EnumMap generateLevelHolders(Set global, + ImmutableOpenMap> indicesBlocks) { + EnumMap levelHolders = new EnumMap<>(ClusterBlockLevel.class); for (final ClusterBlockLevel level : ClusterBlockLevel.values()) { Predicate containsLevel = block -> block.contains(level); Set newGlobal = unmodifiableSet(global.stream() @@ -96,8 +98,7 @@ private static ImmutableLevelHolder[] generateLevelHolders(Set glo .filter(containsLevel) .collect(toSet()))); } - - levelHolders[level.ordinal()] = new ImmutableLevelHolder(newGlobal, indicesBuilder.build()); + levelHolders.put(level, new ImmutableLevelHolder(newGlobal, indicesBuilder.build())); } return levelHolders; } From e4b30071bb8243be96baa744e8ebbe61057b1474 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Thu, 22 Mar 2018 11:58:55 +0100 Subject: [PATCH 09/27] RankEvalRequest should implement IndicesRequest (#29188) Change RankEvalRequest to implement IndicesRequest, so it gets treated in a similar fashion to regular search requests e.g. by security. --- .../org/elasticsearch/client/Request.java | 2 +- .../index/rankeval/RankEvalRequest.java | 78 +++++++++++++---- .../index/rankeval/RestRankEvalAction.java | 2 +- .../rankeval/TransportRankEvalAction.java | 6 +- .../index/rankeval/RankEvalRequestIT.java | 19 ++--- .../index/rankeval/RankEvalRequestTests.java | 83 +++++++++++++++++++ .../index/rankeval/RankEvalSpecTests.java | 7 +- .../rankeval/SmokeMultipleTemplatesIT.java | 2 +- 8 files changed, 162 insertions(+), 37 deletions(-) create mode 100644 modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestTests.java diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java index 66b34da777b6a..0d8057060efeb 100755 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java @@ -531,7 +531,7 @@ static Request existsAlias(GetAliasesRequest getAliasesRequest) { } static Request rankEval(RankEvalRequest rankEvalRequest) throws IOException { - String endpoint = endpoint(rankEvalRequest.getIndices(), Strings.EMPTY_ARRAY, "_rank_eval"); + String endpoint = endpoint(rankEvalRequest.indices(), Strings.EMPTY_ARRAY, "_rank_eval"); HttpEntity entity = createEntity(rankEvalRequest.getRankEvalSpec(), REQUEST_BODY_CONTENT_TYPE); return new Request(HttpGet.METHOD_NAME, endpoint, Collections.emptyMap(), entity); } diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequest.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequest.java index 58fd3b0a694ae..7d3ec94811c5a 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequest.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequest.java @@ -22,24 +22,47 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; +import java.util.Arrays; import java.util.Objects; /** * Request to perform a search ranking evaluation. */ -public class RankEvalRequest extends ActionRequest { +public class RankEvalRequest extends ActionRequest implements IndicesRequest.Replaceable { private RankEvalSpec rankingEvaluationSpec; + + private IndicesOptions indicesOptions = SearchRequest.DEFAULT_INDICES_OPTIONS; private String[] indices = Strings.EMPTY_ARRAY; public RankEvalRequest(RankEvalSpec rankingEvaluationSpec, String[] indices) { - this.rankingEvaluationSpec = rankingEvaluationSpec; - setIndices(indices); + this.rankingEvaluationSpec = Objects.requireNonNull(rankingEvaluationSpec, "ranking evaluation specification must not be null"); + indices(indices); + } + + RankEvalRequest(StreamInput in) throws IOException { + super.readFrom(in); + rankingEvaluationSpec = new RankEvalSpec(in); + if (in.getVersion().onOrAfter(Version.V_6_3_0)) { + indices = in.readStringArray(); + indicesOptions = IndicesOptions.readIndicesOptions(in); + } else { + // readStringArray uses readVInt for size, we used readInt in 6.2 + int indicesSize = in.readInt(); + String[] indices = new String[indicesSize]; + for (int i = 0; i < indicesSize; i++) { + indices[i] = in.readString(); + } + // no indices options yet + } } RankEvalRequest() { @@ -72,7 +95,8 @@ public void setRankEvalSpec(RankEvalSpec task) { /** * Sets the indices the search will be executed on. */ - public RankEvalRequest setIndices(String... indices) { + @Override + public RankEvalRequest indices(String... indices) { Objects.requireNonNull(indices, "indices must not be null"); for (String index : indices) { Objects.requireNonNull(index, "index must not be null"); @@ -84,24 +108,23 @@ public RankEvalRequest setIndices(String... indices) { /** * @return the indices for this request */ - public String[] getIndices() { + @Override + public String[] indices() { return indices; } + @Override + public IndicesOptions indicesOptions() { + return indicesOptions; + } + + public void indicesOptions(IndicesOptions indicesOptions) { + this.indicesOptions = Objects.requireNonNull(indicesOptions, "indicesOptions must not be null"); + } + @Override public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - rankingEvaluationSpec = new RankEvalSpec(in); - if (in.getVersion().onOrAfter(Version.V_6_3_0)) { - indices = in.readStringArray(); - } else { - // readStringArray uses readVInt for size, we used readInt in 6.2 - int indicesSize = in.readInt(); - String[] indices = new String[indicesSize]; - for (int i = 0; i < indicesSize; i++) { - indices[i] = in.readString(); - } - } + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } @Override @@ -110,12 +133,33 @@ public void writeTo(StreamOutput out) throws IOException { rankingEvaluationSpec.writeTo(out); if (out.getVersion().onOrAfter(Version.V_6_3_0)) { out.writeStringArray(indices); + indicesOptions.writeIndicesOptions(out); } else { // writeStringArray uses writeVInt for size, we used writeInt in 6.2 out.writeInt(indices.length); for (String index : indices) { out.writeString(index); } + // no indices options yet + } + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; } + RankEvalRequest that = (RankEvalRequest) o; + return Objects.equals(indicesOptions, that.indicesOptions) && + Arrays.equals(indices, that.indices) && + Objects.equals(rankingEvaluationSpec, that.rankingEvaluationSpec); + } + + @Override + public int hashCode() { + return Objects.hash(indicesOptions, Arrays.hashCode(indices), rankingEvaluationSpec); } } diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RestRankEvalAction.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RestRankEvalAction.java index a596caf4f5c7b..34cf953ea50b7 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RestRankEvalAction.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RestRankEvalAction.java @@ -108,7 +108,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli } private static void parseRankEvalRequest(RankEvalRequest rankEvalRequest, RestRequest request, XContentParser parser) { - rankEvalRequest.setIndices(Strings.splitStringByCommaToArray(request.param("index"))); + rankEvalRequest.indices(Strings.splitStringByCommaToArray(request.param("index"))); RankEvalSpec spec = RankEvalSpec.parse(parser); rankEvalRequest.setRankEvalSpec(spec); } diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java index a4ce4c7ee92e7..d24a779fd61ce 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java @@ -75,8 +75,8 @@ public class TransportRankEvalAction extends HandledTransportAction { + + private static RankEvalPlugin rankEvalPlugin = new RankEvalPlugin(); + + @AfterClass + public static void releasePluginResources() throws IOException { + rankEvalPlugin.close(); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + return new NamedXContentRegistry(rankEvalPlugin.getNamedXContent()); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(rankEvalPlugin.getNamedWriteables()); + } + + @Override + protected RankEvalRequest createTestInstance() { + int numberOfIndices = randomInt(3); + String[] indices = new String[numberOfIndices]; + for (int i=0; i < numberOfIndices; i++) { + indices[i] = randomAlphaOfLengthBetween(5, 10); + } + RankEvalRequest rankEvalRequest = new RankEvalRequest(RankEvalSpecTests.createTestItem(), indices); + IndicesOptions indicesOptions = IndicesOptions.fromOptions( + randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()); + rankEvalRequest.indicesOptions(indicesOptions); + return rankEvalRequest; + } + + @Override + protected Reader instanceReader() { + return RankEvalRequest::new; + } + + @Override + protected RankEvalRequest mutateInstance(RankEvalRequest instance) throws IOException { + RankEvalRequest mutation = copyInstance(instance); + List mutators = new ArrayList<>(); + mutators.add(() -> mutation.indices(ArrayUtils.concat(instance.indices(), new String[] { randomAlphaOfLength(10) }))); + mutators.add(() -> mutation.indicesOptions(randomValueOtherThan(instance.indicesOptions(), + () -> IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())))); + mutators.add(() -> mutation.setRankEvalSpec(RankEvalSpecTests.mutateTestItem(instance.getRankEvalSpec()))); + randomFrom(mutators).run(); + return mutation; + } +} diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalSpecTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalSpecTests.java index 26611679f3494..94338e570a5d2 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalSpecTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalSpecTests.java @@ -70,7 +70,7 @@ private static List randomList(Supplier randomSupplier) { return result; } - private static RankEvalSpec createTestItem() throws IOException { + static RankEvalSpec createTestItem() { Supplier metric = randomFrom(Arrays.asList( () -> PrecisionAtKTests.createTestItem(), () -> MeanReciprocalRankTests.createTestItem(), @@ -87,6 +87,9 @@ private static RankEvalSpec createTestItem() throws IOException { builder.field("field", randomAlphaOfLengthBetween(1, 5)); builder.endObject(); script = Strings.toString(builder); + } catch (IOException e) { + // this shouldn't happen in tests, re-throw just not to swallow it + throw new RuntimeException(e); } templates = new HashSet<>(); @@ -156,7 +159,7 @@ public void testEqualsAndHash() throws IOException { checkEqualsAndHashCode(createTestItem(), RankEvalSpecTests::copy, RankEvalSpecTests::mutateTestItem); } - private static RankEvalSpec mutateTestItem(RankEvalSpec original) { + static RankEvalSpec mutateTestItem(RankEvalSpec original) { List ratedRequests = new ArrayList<>(original.getRatedRequests()); EvaluationMetric metric = original.getMetric(); Map templates = new HashMap<>(original.getTemplates()); diff --git a/qa/smoke-test-rank-eval-with-mustache/src/test/java/org/elasticsearch/index/rankeval/SmokeMultipleTemplatesIT.java b/qa/smoke-test-rank-eval-with-mustache/src/test/java/org/elasticsearch/index/rankeval/SmokeMultipleTemplatesIT.java index 178d429ca9ffd..50860ddd87b21 100644 --- a/qa/smoke-test-rank-eval-with-mustache/src/test/java/org/elasticsearch/index/rankeval/SmokeMultipleTemplatesIT.java +++ b/qa/smoke-test-rank-eval-with-mustache/src/test/java/org/elasticsearch/index/rankeval/SmokeMultipleTemplatesIT.java @@ -102,7 +102,7 @@ public void testPrecisionAtRequest() throws IOException { RankEvalRequestBuilder builder = new RankEvalRequestBuilder(client(), RankEvalAction.INSTANCE, new RankEvalRequest()); builder.setRankEvalSpec(task); - RankEvalResponse response = client().execute(RankEvalAction.INSTANCE, builder.request().setIndices("test")).actionGet(); + RankEvalResponse response = client().execute(RankEvalAction.INSTANCE, builder.request().indices("test")).actionGet(); assertEquals(0.9, response.getEvaluationResult(), Double.MIN_VALUE); } From 6c3278b8e835380bbc1cf129cce9ab34fcff14d5 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Thu, 22 Mar 2018 12:02:53 +0100 Subject: [PATCH 10/27] [Docs] Fix missing closing block in cluster/misc.asciidoc --- docs/reference/modules/cluster/misc.asciidoc | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/reference/modules/cluster/misc.asciidoc b/docs/reference/modules/cluster/misc.asciidoc index 4edcd34e00f5d..837cfcc43ebf7 100644 --- a/docs/reference/modules/cluster/misc.asciidoc +++ b/docs/reference/modules/cluster/misc.asciidoc @@ -82,3 +82,4 @@ Enable or disable allocation for persistent tasks: This setting does not affect the persistent tasks that are already being executed. Only newly created persistent tasks, or tasks that must be reassigned (after a node left the cluster, for example), are impacted by this setting. +-- From 7d1de890b818ee31bd38de27a01412d3a0be497e Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Thu, 22 Mar 2018 08:18:55 -0600 Subject: [PATCH 11/27] Decouple more classes from XContentBuilder and make builder strict (#29197) This commit decouples `BytesRef`, `Releaseable`, and `TimeValue` from XContentBuilder, and paves the way for doupling `ByteSizeValue` as well. It moves much of the Lucene and Joda encoding into a new SPI extension that is loaded by XContentBuilder to know how to encode these values. Part of doing this also allows us to make JSON encoding strict, as we no longer allow just any old object to be passed (in the past it was possible to get json that was `"field": "java.lang.Object@d8355a8"` if no one was careful about what was passed in). Relates to #28504 --- .../main/java/org/elasticsearch/Version.java | 9 ++- .../common/bytes/BytesReference.java | 9 ++- .../common/document/DocumentField.java | 7 +- .../org/elasticsearch/common/text/Text.java | 4 +- .../common/transport/TransportAddress.java | 10 ++- .../common/unit/ByteSizeValue.java | 10 ++- .../elasticsearch/common/unit/TimeValue.java | 10 ++- .../common/xcontent/XContentBuilder.java | 59 ++++---------- .../XContentElasticsearchExtension.java | 78 +++++++++++++++++++ .../common/xcontent/XContentParser.java | 4 +- .../elasticsearch/index/shard/ShardId.java | 10 ++- .../BlobStoreIndexShardSnapshot.java | 3 +- .../DateHistogramValuesSourceBuilder.java | 2 +- .../histogram/DateHistogramInterval.java | 10 ++- .../ValuesSourceAggregationBuilder.java | 2 +- ...h.common.xcontent.XContentBuilderExtension | 1 + .../common/xcontent/BaseXContentTestCase.java | 6 +- .../fielddata/BinaryDVFieldDataTests.java | 2 +- 18 files changed, 171 insertions(+), 65 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/common/xcontent/XContentElasticsearchExtension.java create mode 100644 server/src/main/resources/META-INF/services/org.elasticsearch.common.xcontent.XContentBuilderExtension diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 0f6e79f26ec39..93683259c8080 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -25,6 +25,8 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.monitor.jvm.JvmInfo; import java.io.IOException; @@ -34,7 +36,7 @@ import java.util.Collections; import java.util.List; -public class Version implements Comparable { +public class Version implements Comparable, ToXContentFragment { /* * The logic for ID is: XXYYZZAA, where XX is major version, YY is minor version, ZZ is revision, and AA is alpha/beta/rc indicator AA * values below 25 are for alpha builder (since 5.0), and above 25 and below 50 are beta builds, and below 99 are RC builds, with 99 @@ -418,6 +420,11 @@ public int compareTo(Version other) { return Integer.compare(this.id, other.id); } + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.value(toString()); + } + /* * We need the declared versions when computing the minimum compatibility version. As computing the declared versions uses reflection it * is not cheap. Since computing the minimum compatibility version can occur often, we use this holder to compute the declared versions diff --git a/server/src/main/java/org/elasticsearch/common/bytes/BytesReference.java b/server/src/main/java/org/elasticsearch/common/bytes/BytesReference.java index b9b6bce7969c4..abf832296c069 100644 --- a/server/src/main/java/org/elasticsearch/common/bytes/BytesReference.java +++ b/server/src/main/java/org/elasticsearch/common/bytes/BytesReference.java @@ -23,6 +23,7 @@ import org.apache.lucene.util.BytesRefIterator; import org.elasticsearch.common.io.stream.BytesStream; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.ByteArrayOutputStream; @@ -37,7 +38,7 @@ /** * A reference to bytes. */ -public abstract class BytesReference implements Accountable, Comparable { +public abstract class BytesReference implements Accountable, Comparable, ToXContentFragment { private Integer hash = null; // we cache the hash of this reference since it can be quite costly to re-calculated it @@ -334,4 +335,10 @@ public long skip(long n) throws IOException { return input.skip(n); } } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + BytesRef bytes = toBytesRef(); + return builder.value(bytes.bytes, bytes.offset, bytes.length); + } } diff --git a/server/src/main/java/org/elasticsearch/common/document/DocumentField.java b/server/src/main/java/org/elasticsearch/common/document/DocumentField.java index c9236ea7840b1..f7747c9da254d 100644 --- a/server/src/main/java/org/elasticsearch/common/document/DocumentField.java +++ b/server/src/main/java/org/elasticsearch/common/document/DocumentField.java @@ -19,6 +19,7 @@ package org.elasticsearch.common.document; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -127,11 +128,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws // Stored fields values are converted using MappedFieldType#valueForDisplay. // As a result they can either be Strings, Numbers, or Booleans, that's // all. - if (value instanceof BytesReference) { - builder.binaryValue(((BytesReference) value).toBytesRef()); - } else { - builder.value(value); - } + builder.value(value); } builder.endArray(); return builder; diff --git a/server/src/main/java/org/elasticsearch/common/text/Text.java b/server/src/main/java/org/elasticsearch/common/text/Text.java index 45a1c2d630672..bc0674d0b33c2 100644 --- a/server/src/main/java/org/elasticsearch/common/text/Text.java +++ b/server/src/main/java/org/elasticsearch/common/text/Text.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.common.text; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.ToXContent; @@ -125,7 +126,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } else { // TODO: TextBytesOptimization we can use a buffer here to convert it? maybe add a // request to jackson to support InputStream as well? - return builder.utf8Value(this.bytes().toBytesRef()); + BytesRef br = this.bytes().toBytesRef(); + return builder.utf8Value(br.bytes, br.offset, br.length); } } } diff --git a/server/src/main/java/org/elasticsearch/common/transport/TransportAddress.java b/server/src/main/java/org/elasticsearch/common/transport/TransportAddress.java index a565d8b49d8a3..3127096c80a5b 100644 --- a/server/src/main/java/org/elasticsearch/common/transport/TransportAddress.java +++ b/server/src/main/java/org/elasticsearch/common/transport/TransportAddress.java @@ -23,6 +23,9 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; import java.net.InetAddress; @@ -32,7 +35,7 @@ /** * A transport address used for IP socket address (wraps {@link java.net.InetSocketAddress}). */ -public final class TransportAddress implements Writeable { +public final class TransportAddress implements Writeable, ToXContentFragment { /** * A non-routeable v4 meta transport address that can be used for @@ -128,4 +131,9 @@ public int hashCode() { public String toString() { return NetworkAddress.format(address); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.value(toString()); + } } diff --git a/server/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java b/server/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java index 0981d0c4d7298..1281a982b725c 100644 --- a/server/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java +++ b/server/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java @@ -27,12 +27,15 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; import java.util.Locale; import java.util.Objects; -public class ByteSizeValue implements Writeable, Comparable { +public class ByteSizeValue implements Writeable, Comparable, ToXContentFragment { private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(ByteSizeValue.class)); private final long size; @@ -269,4 +272,9 @@ public int compareTo(ByteSizeValue other) { long otherValue = other.size * other.unit.toBytes(1); return Long.compare(thisValue, otherValue); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.value(toString()); + } } diff --git a/server/src/main/java/org/elasticsearch/common/unit/TimeValue.java b/server/src/main/java/org/elasticsearch/common/unit/TimeValue.java index 0f6eabed1e3de..abd62adaa0e3e 100644 --- a/server/src/main/java/org/elasticsearch/common/unit/TimeValue.java +++ b/server/src/main/java/org/elasticsearch/common/unit/TimeValue.java @@ -24,6 +24,9 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.joda.time.Period; import org.joda.time.PeriodType; import org.joda.time.format.PeriodFormat; @@ -40,7 +43,7 @@ import java.util.Set; import java.util.concurrent.TimeUnit; -public class TimeValue implements Writeable, Comparable { +public class TimeValue implements Writeable, Comparable, ToXContentFragment { /** How many nano-seconds in one milli-second */ public static final long NSEC_PER_MSEC = TimeUnit.NANOSECONDS.convert(1, TimeUnit.MILLISECONDS); @@ -398,4 +401,9 @@ public int compareTo(TimeValue timeValue) { double otherValue = ((double) timeValue.duration) * timeValue.timeUnit.toNanos(1); return Double.compare(thisValue, otherValue); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.value(toString()); + } } diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java b/server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java index b51add28bf539..a02733e551e2d 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java @@ -19,10 +19,7 @@ package org.elasticsearch.common.xcontent; -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.CollectionUtils; import org.joda.time.DateTimeZone; import org.joda.time.ReadableInstant; @@ -30,11 +27,13 @@ import org.joda.time.format.ISODateTimeFormat; import java.io.ByteArrayOutputStream; +import java.io.Closeable; import java.io.Flushable; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.nio.file.Path; +import java.time.ZonedDateTime; import java.util.Arrays; import java.util.Calendar; import java.util.Collections; @@ -49,7 +48,7 @@ /** * A utility to build XContent (ie json). */ -public final class XContentBuilder implements Releasable, Flushable { +public final class XContentBuilder implements Closeable, Flushable { /** * Create a new {@link XContentBuilder} using the given {@link XContent} content. @@ -91,7 +90,6 @@ public static XContentBuilder builder(XContent xContent, Set includes, S writers.put(Boolean.class, (b, v) -> b.value((Boolean) v)); writers.put(Byte.class, (b, v) -> b.value((Byte) v)); writers.put(byte[].class, (b, v) -> b.value((byte[]) v)); - writers.put(BytesRef.class, (b, v) -> b.binaryValue((BytesRef) v)); writers.put(Date.class, (b, v) -> b.value((Date) v)); writers.put(Double.class, (b, v) -> b.value((Double) v)); writers.put(double[].class, (b, v) -> b.values((double[]) v)); @@ -105,12 +103,12 @@ public static XContentBuilder builder(XContent xContent, Set includes, S writers.put(short[].class, (b, v) -> b.values((short[]) v)); writers.put(String.class, (b, v) -> b.value((String) v)); writers.put(String[].class, (b, v) -> b.values((String[]) v)); + writers.put(Locale.class, (b, v) -> b.value(v.toString())); + writers.put(Class.class, (b, v) -> b.value(v.toString())); + writers.put(ZonedDateTime.class, (b, v) -> b.value(v.toString())); Map, HumanReadableTransformer> humanReadableTransformer = new HashMap<>(); - // These will be moved to a different class at a later time to decouple them from XContentBuilder - humanReadableTransformer.put(TimeValue.class, v -> ((TimeValue) v).millis()); - humanReadableTransformer.put(ByteSizeValue.class, v -> ((ByteSizeValue) v).getBytes()); // Load pluggable extensions for (XContentBuilderExtension service : ServiceLoader.load(XContentBuilderExtension.class)) { @@ -613,49 +611,25 @@ public XContentBuilder value(byte[] value, int offset, int length) throws IOExce } /** - * Writes the binary content of the given {@link BytesRef}. - * - * Use {@link org.elasticsearch.common.xcontent.XContentParser#binaryValue()} to read the value back - */ - public XContentBuilder field(String name, BytesRef value) throws IOException { - return field(name).binaryValue(value); - } - - /** - * Writes the binary content of the given {@link BytesRef} as UTF-8 bytes. + * Writes the binary content of the given byte array as UTF-8 bytes. * * Use {@link XContentParser#charBuffer()} to read the value back */ - public XContentBuilder utf8Field(String name, BytesRef value) throws IOException { - return field(name).utf8Value(value); - } - - /** - * Writes the binary content of the given {@link BytesRef}. - * - * Use {@link org.elasticsearch.common.xcontent.XContentParser#binaryValue()} to read the value back - */ - public XContentBuilder binaryValue(BytesRef value) throws IOException { - if (value == null) { - return nullValue(); - } - value(value.bytes, value.offset, value.length); - return this; + public XContentBuilder utf8Field(String name, byte[] bytes, int offset, int length) throws IOException { + return field(name).utf8Value(bytes, offset, length); } /** - * Writes the binary content of the given {@link BytesRef} as UTF-8 bytes. + * Writes the binary content of the given byte array as UTF-8 bytes. * * Use {@link XContentParser#charBuffer()} to read the value back */ - public XContentBuilder utf8Value(BytesRef value) throws IOException { - if (value == null) { - return nullValue(); - } - generator.writeUTF8String(value.bytes, value.offset, value.length); + public XContentBuilder utf8Value(byte[] bytes, int offset, int length) throws IOException { + generator.writeUTF8String(bytes, offset, length); return this; } + //////////////////////////////////////////////////////////////////////////// // Date ////////////////////////////////// @@ -793,10 +767,11 @@ private void unknownValue(Object value, boolean ensureNoSelfReferences) throws I value((ReadableInstant) value); } else if (value instanceof ToXContent) { value((ToXContent) value); - } else { - // This is a "value" object (like enum, DistanceUnit, etc) just toString() it - // (yes, it can be misleading when toString a Java class, but really, jackson should be used in that case) + } else if (value instanceof Enum) { + // Write out the Enum toString value(Objects.toString(value)); + } else { + throw new IllegalArgumentException("cannot write xcontent for unknown value of type " + value.getClass()); } } diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentElasticsearchExtension.java b/server/src/main/java/org/elasticsearch/common/xcontent/XContentElasticsearchExtension.java new file mode 100644 index 0000000000000..1c852c68960a7 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/xcontent/XContentElasticsearchExtension.java @@ -0,0 +1,78 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.xcontent; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.joda.time.DateTimeZone; +import org.joda.time.tz.CachedDateTimeZone; +import org.joda.time.tz.FixedDateTimeZone; + +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +/** + * SPI extensions for Elasticsearch-specific classes (like the Lucene or Joda + * dependency classes) that need to be encoded by {@link XContentBuilder} in a + * specific way. + */ +public class XContentElasticsearchExtension implements XContentBuilderExtension { + + @Override + public Map, XContentBuilder.Writer> getXContentWriters() { + Map, XContentBuilder.Writer> writers = new HashMap<>(); + + // Fully-qualified here to reduce ambiguity around our (ES') Version class + writers.put(org.apache.lucene.util.Version.class, (b, v) -> b.value(Objects.toString(v))); + writers.put(DateTimeZone.class, (b, v) -> b.value(Objects.toString(v))); + writers.put(CachedDateTimeZone.class, (b, v) -> b.value(Objects.toString(v))); + writers.put(FixedDateTimeZone.class, (b, v) -> b.value(Objects.toString(v))); + + writers.put(BytesReference.class, (b, v) -> { + if (v == null) { + b.nullValue(); + } else { + BytesRef bytes = ((BytesReference) v).toBytesRef(); + b.value(bytes.bytes, bytes.offset, bytes.length); + } + }); + + writers.put(BytesRef.class, (b, v) -> { + if (v == null) { + b.nullValue(); + } else { + BytesRef bytes = (BytesRef) v; + b.value(bytes.bytes, bytes.offset, bytes.length); + } + }); + return writers; + } + + @Override + public Map, XContentBuilder.HumanReadableTransformer> getXContentHumanReadableTransformers() { + Map, XContentBuilder.HumanReadableTransformer> transformers = new HashMap<>(); + transformers.put(TimeValue.class, v -> ((TimeValue) v).millis()); + transformers.put(ByteSizeValue.class, v -> ((ByteSizeValue) v).getBytes()); + return transformers; + } +} diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java b/server/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java index a645bf81da343..06cc10713bec5 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java @@ -228,7 +228,6 @@ enum NumberType { * Reads a plain binary value that was written via one of the following methods: * *

    - *
  • {@link XContentBuilder#field(String, org.apache.lucene.util.BytesRef)}
  • *
  • {@link XContentBuilder#field(String, byte[], int, int)}}
  • *
  • {@link XContentBuilder#field(String, byte[])}}
  • *
@@ -236,8 +235,7 @@ enum NumberType { * as well as via their String variants of the separated value methods. * Note: Do not use this method to read values written with: *
    - *
  • {@link XContentBuilder#utf8Field(String, org.apache.lucene.util.BytesRef)}
  • - *
  • {@link XContentBuilder#utf8Field(String, org.apache.lucene.util.BytesRef)}
  • + *
  • {@link XContentBuilder#utf8Field(String, byte[], int, int)}
  • *
* * these methods write UTF-8 encoded strings and must be read through: diff --git a/server/src/main/java/org/elasticsearch/index/shard/ShardId.java b/server/src/main/java/org/elasticsearch/index/shard/ShardId.java index a806c414e9aea..085fd6e339282 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/ShardId.java +++ b/server/src/main/java/org/elasticsearch/index/shard/ShardId.java @@ -23,6 +23,9 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.Index; import java.io.IOException; @@ -30,7 +33,7 @@ /** * Allows for shard level components to be injected with the shard id. */ -public class ShardId implements Streamable, Comparable { +public class ShardId implements Streamable, Comparable, ToXContentFragment { private Index index; @@ -137,4 +140,9 @@ public int compareTo(ShardId o) { } return Integer.compare(shardId, o.getId()); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.value(toString()); + } } diff --git a/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java b/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java index 124b538d3facf..ee285cc4f9569 100644 --- a/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java +++ b/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java @@ -266,7 +266,8 @@ public static void toXContent(FileInfo file, XContentBuilder builder, ToXContent } if (file.metadata.hash() != null && file.metadata().hash().length > 0) { - builder.field(META_HASH, file.metadata.hash()); + BytesRef br = file.metadata.hash(); + builder.field(META_HASH, br.bytes, br.offset, br.length); } builder.endObject(); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java index b7abf82a58ea3..caa16b7a9d57f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java @@ -120,7 +120,7 @@ protected void doXContentBody(XContentBuilder builder, Params params) throws IOE builder.field(Histogram.INTERVAL_FIELD.getPreferredName(), dateHistogramInterval.toString()); } if (timeZone != null) { - builder.field("time_zone", timeZone); + builder.field("time_zone", timeZone.toString()); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramInterval.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramInterval.java index 9310142aa9c41..9b34739b96d6e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramInterval.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramInterval.java @@ -22,6 +22,9 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; import java.util.Objects; @@ -29,7 +32,7 @@ /** * The interval the date histogram is based on. */ -public class DateHistogramInterval implements Writeable { +public class DateHistogramInterval implements Writeable, ToXContentFragment { public static final DateHistogramInterval SECOND = new DateHistogramInterval("1s"); public static final DateHistogramInterval MINUTE = new DateHistogramInterval("1m"); @@ -100,4 +103,9 @@ public boolean equals(Object obj) { DateHistogramInterval other = (DateHistogramInterval) obj; return Objects.equals(expression, other.expression); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.value(toString()); + } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java index eb81d5a9b6b7e..81b6d23f3873d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java @@ -338,7 +338,7 @@ public final XContentBuilder internalXContent(XContentBuilder builder, Params pa builder.field("format", format); } if (timeZone != null) { - builder.field("time_zone", timeZone); + builder.field("time_zone", timeZone.toString()); } if (valueType != null) { builder.field("value_type", valueType.getPreferredName()); diff --git a/server/src/main/resources/META-INF/services/org.elasticsearch.common.xcontent.XContentBuilderExtension b/server/src/main/resources/META-INF/services/org.elasticsearch.common.xcontent.XContentBuilderExtension new file mode 100644 index 0000000000000..841c2e60d3d82 --- /dev/null +++ b/server/src/main/resources/META-INF/services/org.elasticsearch.common.xcontent.XContentBuilderExtension @@ -0,0 +1 @@ +org.elasticsearch.common.xcontent.XContentElasticsearchExtension diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java b/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java index e74d3b7acea97..8f7a177fae720 100644 --- a/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java +++ b/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java @@ -326,14 +326,14 @@ public void testBinaryValueWithOffsetLength() throws Exception { } public void testBinaryUTF8() throws Exception { - assertResult("{'utf8':null}", () -> builder().startObject().utf8Field("utf8", null).endObject()); + assertResult("{'utf8':null}", () -> builder().startObject().nullField("utf8").endObject()); final BytesRef randomBytesRef = new BytesRef(randomBytes()); XContentBuilder builder = builder().startObject(); if (randomBoolean()) { - builder.utf8Field("utf8", randomBytesRef); + builder.utf8Field("utf8", randomBytesRef.bytes, randomBytesRef.offset, randomBytesRef.length); } else { - builder.field("utf8").utf8Value(randomBytesRef); + builder.field("utf8").utf8Value(randomBytesRef.bytes, randomBytesRef.offset, randomBytesRef.length); } builder.endObject(); diff --git a/server/src/test/java/org/elasticsearch/index/fielddata/BinaryDVFieldDataTests.java b/server/src/test/java/org/elasticsearch/index/fielddata/BinaryDVFieldDataTests.java index 3d811832d2951..7f407dd1c01d1 100644 --- a/server/src/test/java/org/elasticsearch/index/fielddata/BinaryDVFieldDataTests.java +++ b/server/src/test/java/org/elasticsearch/index/fielddata/BinaryDVFieldDataTests.java @@ -68,7 +68,7 @@ public void testDocValue() throws Exception { writer.addDocument(d.rootDoc()); BytesRef bytes1 = randomBytes(); - doc = XContentFactory.jsonBuilder().startObject().field("field", bytes1).endObject(); + doc = XContentFactory.jsonBuilder().startObject().field("field", bytes1.bytes, bytes1.offset, bytes1.length).endObject(); d = mapper.parse(SourceToParse.source("test", "test", "2", BytesReference.bytes(doc), XContentType.JSON)); writer.addDocument(d.rootDoc()); From 98f89c3952b4ac68969e5b7f8c2650cea77d652f Mon Sep 17 00:00:00 2001 From: tomcallahan Date: Thu, 22 Mar 2018 10:20:18 -0400 Subject: [PATCH 12/27] Remove license information from README.textile (#29198) Remove license information from README.textile in preparation for the the inclusion of X-Pack into the main elasticsearch repository. This is necessary to avoid confusion around licensing, as the entire repository will no longer be Apache 2.0. --- README.textile | 21 --------------------- 1 file changed, 21 deletions(-) diff --git a/README.textile b/README.textile index c964e31655dc8..ce7b3b7d34476 100644 --- a/README.textile +++ b/README.textile @@ -27,7 +27,6 @@ Elasticsearch is a distributed RESTful search engine built for the cloud. Featur ** All the power of Lucene easily exposed through simple configuration / plugins. * Per operation consistency ** Single document level operations are atomic, consistent, isolated and durable. -* Open Source under the Apache License, version 2 ("ALv2") h2. Getting Started @@ -217,23 +216,3 @@ Elasticsearch (1.x), it is required to perform a full cluster restart. Please see the "setup reference": https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-upgrade.html for more details on the upgrade process. - -h1. License - -
-This software is licensed under the Apache License, version 2 ("ALv2"), quoted below.
-
-Copyright 2009-2016 Elasticsearch 
-
-Licensed under the Apache License, Version 2.0 (the "License"); you may not
-use this file except in compliance with the License. You may obtain a copy of
-the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-License for the specific language governing permissions and limitations under
-the License.
-
From 24c8d8f5ef0886a1799213841610eec038d4872f Mon Sep 17 00:00:00 2001 From: Yu Date: Thu, 22 Mar 2018 17:17:16 +0100 Subject: [PATCH 13/27] REST high-level client: add force merge API (#28896) Relates to #27205 --- .../elasticsearch/client/IndicesClient.java | 28 ++++- .../org/elasticsearch/client/Request.java | 12 +++ .../elasticsearch/client/IndicesClientIT.java | 28 +++++ .../elasticsearch/client/RequestTests.java | 38 +++++++ .../IndicesClientDocumentationIT.java | 76 ++++++++++++- .../high-level/indices/force_merge.asciidoc | 102 ++++++++++++++++++ .../high-level/supported-apis.asciidoc | 2 + .../forcemerge/ForceMergeResponse.java | 18 ++++ .../admin/indices/RestForceMergeAction.java | 3 +- .../forcemerge/ForceMergeResponseTests.java | 39 +++++++ 10 files changed, 341 insertions(+), 5 deletions(-) create mode 100644 docs/java-rest/high-level/indices/force_merge.asciidoc create mode 100644 server/src/test/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponseTests.java diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java index 0b366aa99e188..f5b46a6a53192 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java @@ -34,15 +34,17 @@ import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushResponse; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; -import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; -import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; +import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; +import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeResponse; @@ -261,6 +263,28 @@ public void flushAsync(FlushRequest flushRequest, ActionListener listener, emptySet(), headers); } + /** + * Force merge one or more indices using the Force Merge API + *

+ * See + * Force Merge API on elastic.co + */ + public ForceMergeResponse forceMerge(ForceMergeRequest forceMergeRequest, Header... headers) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(forceMergeRequest, Request::forceMerge, ForceMergeResponse::fromXContent, + emptySet(), headers); + } + + /** + * Asynchronously force merge one or more indices using the Force Merge API + *

+ * See + * Force Merge API on elastic.co + */ + public void forceMergeAsync(ForceMergeRequest forceMergeRequest, ActionListener listener, Header... headers) { + restHighLevelClient.performRequestAsyncAndParseEntity(forceMergeRequest, Request::forceMerge, ForceMergeResponse::fromXContent, + listener, emptySet(), headers); + } + /** * Clears the cache of one or more indices using the Clear Cache API *

diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java index 0d8057060efeb..802b1492be092 100755 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java @@ -37,6 +37,7 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.flush.FlushRequest; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; @@ -233,6 +234,17 @@ static Request flush(FlushRequest flushRequest) { return new Request(HttpPost.METHOD_NAME, endpoint, parameters.getParams(), null); } + static Request forceMerge(ForceMergeRequest forceMergeRequest) { + String[] indices = forceMergeRequest.indices() == null ? Strings.EMPTY_ARRAY : forceMergeRequest.indices(); + String endpoint = endpoint(indices, "_forcemerge"); + Params parameters = Params.builder(); + parameters.withIndicesOptions(forceMergeRequest.indicesOptions()); + parameters.putParam("max_num_segments", Integer.toString(forceMergeRequest.maxNumSegments())); + parameters.putParam("only_expunge_deletes", Boolean.toString(forceMergeRequest.onlyExpungeDeletes())); + parameters.putParam("flush", Boolean.toString(forceMergeRequest.flush())); + return new Request(HttpPost.METHOD_NAME, endpoint, parameters.getParams(), null); + } + static Request clearCache(ClearIndicesCacheRequest clearIndicesCacheRequest) { String[] indices = clearIndicesCacheRequest.indices() == null ? Strings.EMPTY_ARRAY :clearIndicesCacheRequest.indices(); String endpoint = endpoint(indices, "_cache/clear"); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java index 8a2ba44791149..7a29a35d20ab1 100755 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java @@ -38,6 +38,8 @@ import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushResponse; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; @@ -467,6 +469,32 @@ public void testClearCache() throws IOException { } } + public void testForceMerge() throws IOException { + { + String index = "index"; + Settings settings = Settings.builder() + .put("number_of_shards", 1) + .put("number_of_replicas", 0) + .build(); + createIndex(index, settings); + ForceMergeRequest forceMergeRequest = new ForceMergeRequest(index); + ForceMergeResponse forceMergeResponse = + execute(forceMergeRequest, highLevelClient().indices()::forceMerge, highLevelClient().indices()::forceMergeAsync); + assertThat(forceMergeResponse.getTotalShards(), equalTo(1)); + assertThat(forceMergeResponse.getSuccessfulShards(), equalTo(1)); + assertThat(forceMergeResponse.getFailedShards(), equalTo(0)); + assertThat(forceMergeResponse.getShardFailures(), equalTo(BroadcastResponse.EMPTY)); + } + { + String nonExistentIndex = "non_existent_index"; + assertFalse(indexExists(nonExistentIndex)); + ForceMergeRequest forceMergeRequest = new ForceMergeRequest(nonExistentIndex); + ElasticsearchException exception = expectThrows(ElasticsearchException.class, + () -> execute(forceMergeRequest, highLevelClient().indices()::forceMerge, highLevelClient().indices()::forceMergeAsync)); + assertEquals(RestStatus.NOT_FOUND, exception.status()); + } + } + public void testExistsAlias() throws IOException { GetAliasesRequest getAliasesRequest = new GetAliasesRequest("alias"); assertFalse(execute(getAliasesRequest, highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync)); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java index f79135c44f5ec..75ac543fbb4ce 100755 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java @@ -40,6 +40,7 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.flush.FlushRequest; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; @@ -621,6 +622,43 @@ public void testFlush() { assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); } + public void testForceMerge() { + String[] indices = randomBoolean() ? null : randomIndicesNames(0, 5); + ForceMergeRequest forceMergeRequest; + if (randomBoolean()) { + forceMergeRequest = new ForceMergeRequest(indices); + } else { + forceMergeRequest = new ForceMergeRequest(); + forceMergeRequest.indices(indices); + } + + Map expectedParams = new HashMap<>(); + setRandomIndicesOptions(forceMergeRequest::indicesOptions, forceMergeRequest::indicesOptions, expectedParams); + if (randomBoolean()) { + forceMergeRequest.maxNumSegments(randomInt()); + } + expectedParams.put("max_num_segments", Integer.toString(forceMergeRequest.maxNumSegments())); + if (randomBoolean()) { + forceMergeRequest.onlyExpungeDeletes(randomBoolean()); + } + expectedParams.put("only_expunge_deletes", Boolean.toString(forceMergeRequest.onlyExpungeDeletes())); + if (randomBoolean()) { + forceMergeRequest.flush(randomBoolean()); + } + expectedParams.put("flush", Boolean.toString(forceMergeRequest.flush())); + + Request request = Request.forceMerge(forceMergeRequest); + StringJoiner endpoint = new StringJoiner("/", "/", ""); + if (indices != null && indices.length > 0) { + endpoint.add(String.join(",", indices)); + } + endpoint.add("_forcemerge"); + assertThat(request.getEndpoint(), equalTo(endpoint.toString())); + assertThat(request.getParameters(), equalTo(expectedParams)); + assertThat(request.getEntity(), nullValue()); + assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); + } + public void testClearCache() { String[] indices = randomBoolean() ? null : randomIndicesNames(0, 5); ClearIndicesCacheRequest clearIndicesCacheRequest; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java index fb9e56d222022..bc6946eb2dc7f 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java @@ -37,6 +37,8 @@ import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushResponse; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; @@ -771,6 +773,79 @@ public void onFailure(Exception e) { } } + public void testForceMergeIndex() throws Exception { + RestHighLevelClient client = highLevelClient(); + + { + createIndex("index", Settings.EMPTY); + } + + { + // tag::force-merge-request + ForceMergeRequest request = new ForceMergeRequest("index1"); // <1> + ForceMergeRequest requestMultiple = new ForceMergeRequest("index1", "index2"); // <2> + ForceMergeRequest requestAll = new ForceMergeRequest(); // <3> + // end::force-merge-request + + // tag::force-merge-request-indicesOptions + request.indicesOptions(IndicesOptions.lenientExpandOpen()); // <1> + // end::force-merge-request-indicesOptions + + // tag::force-merge-request-segments-num + request.maxNumSegments(1); // <1> + // end::force-merge-request-segments-num + + // tag::force-merge-request-only-expunge-deletes + request.onlyExpungeDeletes(true); // <1> + // end::force-merge-request-only-expunge-deletes + + // tag::force-merge-request-flush + request.flush(true); // <1> + // end::force-merge-request-flush + + // tag::force-merge-execute + ForceMergeResponse forceMergeResponse = client.indices().forceMerge(request); + // end::force-merge-execute + + // tag::force-merge-response + int totalShards = forceMergeResponse.getTotalShards(); // <1> + int successfulShards = forceMergeResponse.getSuccessfulShards(); // <2> + int failedShards = forceMergeResponse.getFailedShards(); // <3> + DefaultShardOperationFailedException[] failures = forceMergeResponse.getShardFailures(); // <4> + // end::force-merge-response + + // tag::force-merge-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(ForceMergeResponse forceMergeResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::force-merge-execute-listener + + // tag::force-merge-execute-async + client.indices().forceMergeAsync(request, listener); // <1> + // end::force-merge-execute-async + } + { + // tag::force-merge-notfound + try { + ForceMergeRequest request = new ForceMergeRequest("does_not_exist"); + client.indices().forceMerge(request); + } catch (ElasticsearchException exception) { + if (exception.status() == RestStatus.NOT_FOUND) { + // <1> + } + } + // end::force-merge-notfound + } + } + public void testClearCache() throws Exception { RestHighLevelClient client = highLevelClient(); @@ -855,7 +930,6 @@ public void onFailure(Exception e) { } } - public void testCloseIndex() throws Exception { RestHighLevelClient client = highLevelClient(); diff --git a/docs/java-rest/high-level/indices/force_merge.asciidoc b/docs/java-rest/high-level/indices/force_merge.asciidoc new file mode 100644 index 0000000000000..6fe1fcd82b749 --- /dev/null +++ b/docs/java-rest/high-level/indices/force_merge.asciidoc @@ -0,0 +1,102 @@ +[[java-rest-high-force-merge]] +=== Force Merge API + +[[java-rest-high-force-merge-request]] +==== Force merge Request + +A `ForceMergeRequest` can be applied to one or more indices, or even on `_all` the indices: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[force-merge-request] +-------------------------------------------------- +<1> Force merge one index +<2> Force merge multiple indices +<3> Force merge all the indices + +==== Optional arguments + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[force-merge-request-indicesOptions] +-------------------------------------------------- +<1> Setting `IndicesOptions` controls how unavailable indices are resolved and +how wildcard expressions are expanded + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[force-merge-request-segments-num] +-------------------------------------------------- +<1> Set `max_num_segments` to control the number of segments to merge down to. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[force-merge-request-only-expunge-deletes] +-------------------------------------------------- +<1> Set the `only_expunge_deletes` flag to `true` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[force-merge-request-flush] +-------------------------------------------------- +<1> Set the `flush` flag to `true` + +[[java-rest-high-force-merge-sync]] +==== Synchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[force-merge-execute] +-------------------------------------------------- + +[[java-rest-high-force-merge-async]] +==== Asynchronous Execution + +The asynchronous execution of a force merge request requires both the `ForceMergeRequest` +instance and an `ActionListener` instance to be passed to the asynchronous +method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[force-merge-execute-async] +-------------------------------------------------- +<1> The `ForceMergeRequest` to execute and the `ActionListener` to use when +the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `ForceMergeResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[force-merge-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of failure. The raised exception is provided as an argument + +[[java-rest-high-force-merge-response]] +==== Force Merge Response + +The returned `ForceMergeResponse` allows to retrieve information about the +executed operation as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[force-merge-response] +-------------------------------------------------- +<1> Total number of shards hit by the force merge request +<2> Number of shards where the force merge has succeeded +<3> Number of shards where the force merge has failed +<4> A list of failures if the operation failed on one or more shards + +By default, if the indices were not found, an `ElasticsearchException` will be thrown: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[force-merge-notfound] +-------------------------------------------------- +<1> Do something if the indices to be force merged were not found \ No newline at end of file diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index bea30690fe183..de5a3d6b6a656 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -60,6 +60,7 @@ Index Management:: * <> * <> * <> +* <> * <> Mapping Management:: @@ -79,6 +80,7 @@ include::indices/split_index.asciidoc[] include::indices/refresh.asciidoc[] include::indices/flush.asciidoc[] include::indices/clear_cache.asciidoc[] +include::indices/force_merge.asciidoc[] include::indices/rollover.asciidoc[] include::indices/put_mapping.asciidoc[] include::indices/update_aliases.asciidoc[] diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponse.java index f77bb5d6a57de..6ebbbbd34cd5b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponse.java @@ -21,7 +21,10 @@ import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastResponse; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; +import java.util.Arrays; import java.util.List; /** @@ -29,10 +32,25 @@ */ public class ForceMergeResponse extends BroadcastResponse { + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("force_merge", + true, arg -> { + BroadcastResponse response = (BroadcastResponse) arg[0]; + return new ForceMergeResponse(response.getTotalShards(), response.getSuccessfulShards(), response.getFailedShards(), + Arrays.asList(response.getShardFailures())); + }); + + static { + declareBroadcastFields(PARSER); + } + ForceMergeResponse() { } ForceMergeResponse(int totalShards, int successfulShards, int failedShards, List shardFailures) { super(totalShards, successfulShards, failedShards, shardFailures); } + + public static ForceMergeResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestForceMergeAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestForceMergeAction.java index 79beb66d40b1b..394c4822f0ea8 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestForceMergeAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestForceMergeAction.java @@ -37,7 +37,6 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestStatus.OK; -import static org.elasticsearch.rest.action.RestActions.buildBroadcastShardsHeader; public class RestForceMergeAction extends BaseRestHandler { public RestForceMergeAction(Settings settings, RestController controller) { @@ -62,7 +61,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC @Override public RestResponse buildResponse(ForceMergeResponse response, XContentBuilder builder) throws Exception { builder.startObject(); - buildBroadcastShardsHeader(builder, request, response); + response.toXContent(builder, request); builder.endObject(); return new BytesRestResponse(OK, builder); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponseTests.java new file mode 100644 index 0000000000000..f5e86fdcdfe9b --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponseTests.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.forcemerge; + +import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.AbstractBroadcastResponseTestCase; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.util.List; + +public class ForceMergeResponseTests extends AbstractBroadcastResponseTestCase { + @Override + protected ForceMergeResponse createTestInstance(int totalShards, int successfulShards, int failedShards, + List failures) { + return new ForceMergeResponse(totalShards, successfulShards, failedShards, failures); + } + + @Override + protected ForceMergeResponse doParseInstance(XContentParser parser) { + return ForceMergeResponse.fromXContent(parser); + } +} From c93c7f3121b321457081d1f965974c87acc02ca2 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Thu, 22 Mar 2018 18:37:08 +0100 Subject: [PATCH 14/27] Remove deprecated options for query_string (#29203) This commit removes some parameters deprecated in 6.x (or 5.x): `use_dismax`, `split_on_whitespace`, `all_fields` and `lowercase_expanded_terms`. Closes #25551 --- .../migration/migrate_7_0/search.asciidoc | 4 ++ .../query-dsl/query-string-query.asciidoc | 6 -- .../index/query/QueryStringQueryBuilder.java | 58 +++---------------- 3 files changed, 13 insertions(+), 55 deletions(-) diff --git a/docs/reference/migration/migrate_7_0/search.asciidoc b/docs/reference/migration/migrate_7_0/search.asciidoc index c55ad8c424057..0d3770993b2ff 100644 --- a/docs/reference/migration/migrate_7_0/search.asciidoc +++ b/docs/reference/migration/migrate_7_0/search.asciidoc @@ -5,6 +5,10 @@ * The default value for `transpositions` parameter of `fuzzy` query has been changed to `true`. +* The `query_string` options `use_dismax`, `split_on_whitespace`, + `all_fields`, `locale`, `auto_generate_phrase_query` and + `lowercase_expanded_terms` deprecated in 6.x have been removed. + ==== Adaptive replica selection enabled by default Adaptive replica selection has been enabled by default. If you wish to return to diff --git a/docs/reference/query-dsl/query-string-query.asciidoc b/docs/reference/query-dsl/query-string-query.asciidoc index 40b0978cc1977..ecefee7757548 100644 --- a/docs/reference/query-dsl/query-string-query.asciidoc +++ b/docs/reference/query-dsl/query-string-query.asciidoc @@ -99,8 +99,6 @@ phrase matches are required. Default value is `0`. |`boost` |Sets the boost value of the query. Defaults to `1.0`. -|`auto_generate_phrase_queries` |Defaults to `false`. - |`analyze_wildcard` |By default, wildcards terms in a query string are not analyzed. By setting this value to `true`, a best effort will be made to analyze those as well. @@ -129,10 +127,6 @@ comprehensive example. |`auto_generate_synonyms_phrase_query` |Whether phrase queries should be automatically generated for multi terms synonyms. Defaults to `true`. -|`all_fields` | deprecated[6.0.0, set `default_field` to `*` instead] -Perform the query on all fields detected in the mapping that can -be queried. - |======================================================================= When a multi term query is being generated, one can control how it gets diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java index 56c49b7f2c1bf..4ce8aae52c133 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java @@ -78,15 +78,9 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder Date: Thu, 22 Mar 2018 14:31:15 -0400 Subject: [PATCH 15/27] Harden periodically check to avoid endless flush loop (#29125) In #28350, we fixed an endless flushing loop which may happen on replicas by tightening the relation between the flush action and the periodically flush condition. 1. The periodically flush condition is enabled only if it is disabled after a flush. 2. If the periodically flush condition is enabled then a flush will actually happen regardless of Lucene state. (1) and (2) guarantee that a flushing loop will be terminated. Sadly, the condition 1 can be violated in edge cases as we used two different algorithms to evaluate the current and future uncommitted translog size. - We use method `uncommittedSizeInBytes` to calculate current uncommitted size. It is the sum of translogs whose generation at least the minGen (determined by a given seqno). We pick a continuous range of translogs since the minGen to evaluate the current uncommitted size. - We use method `sizeOfGensAboveSeqNoInBytes` to calculate the future uncommitted size. It is the sum of translogs whose maxSeqNo at least the given seqNo. Here we don't pick a range but select translog one by one. Suppose we have 3 translogs `gen1={#1,#2}, gen2={}, gen3={#3} and seqno=#1`, `uncommittedSizeInBytes` is the sum of gen1, gen2, and gen3 while `sizeOfGensAboveSeqNoInBytes` is the sum of gen1 and gen3. Gen2 is excluded because its maxSeqno is still -1. This commit removes both `sizeOfGensAboveSeqNoInBytes` and `uncommittedSizeInBytes` methods, then enforces an engine to use only `sizeInBytesByMinGen` method to evaluate the periodically flush condition. Closes #29097 Relates ##28350 --- .../index/engine/InternalEngine.java | 30 ++++++--- .../index/translog/Translog.java | 38 +++--------- .../translog/TranslogDeletionPolicy.java | 1 - .../index/engine/EngineDiskUtilsTests.java | 2 +- .../index/engine/InternalEngineTests.java | 62 +++++++++++++++---- .../index/shard/IndexShardIT.java | 20 +++--- .../index/translog/TranslogTests.java | 6 +- .../indices/recovery/RecoveryTests.java | 2 +- 8 files changed, 94 insertions(+), 67 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 49be68efcad5d..6c6752de7d2d4 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -1361,7 +1361,8 @@ final boolean tryRenewSyncCommit() { ensureOpen(); ensureCanFlush(); String syncId = lastCommittedSegmentInfos.getUserData().get(SYNC_COMMIT_ID); - if (syncId != null && translog.uncommittedOperations() == 0 && indexWriter.hasUncommittedChanges()) { + long translogGenOfLastCommit = Long.parseLong(lastCommittedSegmentInfos.userData.get(Translog.TRANSLOG_GENERATION_KEY)); + if (syncId != null && indexWriter.hasUncommittedChanges() && translog.totalOperationsByMinGen(translogGenOfLastCommit) == 0) { logger.trace("start renewing sync commit [{}]", syncId); commitIndexWriter(indexWriter, translog, syncId); logger.debug("successfully sync committed. sync id [{}].", syncId); @@ -1383,19 +1384,30 @@ final boolean tryRenewSyncCommit() { @Override public boolean shouldPeriodicallyFlush() { ensureOpen(); + final long translogGenerationOfLastCommit = Long.parseLong(lastCommittedSegmentInfos.userData.get(Translog.TRANSLOG_GENERATION_KEY)); final long flushThreshold = config().getIndexSettings().getFlushThresholdSize().getBytes(); - final long uncommittedSizeOfCurrentCommit = translog.uncommittedSizeInBytes(); - if (uncommittedSizeOfCurrentCommit < flushThreshold) { + if (translog.sizeInBytesByMinGen(translogGenerationOfLastCommit) < flushThreshold) { return false; } /* - * We should only flush ony if the shouldFlush condition can become false after flushing. - * This condition will change if the `uncommittedSize` of the new commit is smaller than - * the `uncommittedSize` of the current commit. This method is to maintain translog only, - * thus the IndexWriter#hasUncommittedChanges condition is not considered. + * We flush to reduce the size of uncommitted translog but strictly speaking the uncommitted size won't always be + * below the flush-threshold after a flush. To avoid getting into an endless loop of flushing, we only enable the + * periodically flush condition if this condition is disabled after a flush. The condition will change if the new + * commit points to the later generation the last commit's(eg. gen-of-last-commit < gen-of-new-commit)[1]. + * + * When the local checkpoint equals to max_seqno, and translog-gen of the last commit equals to translog-gen of + * the new commit, we know that the last generation must contain operations because its size is above the flush + * threshold and the flush-threshold is guaranteed to be higher than an empty translog by the setting validation. + * This guarantees that the new commit will point to the newly rolled generation. In fact, this scenario only + * happens when the generation-threshold is close to or above the flush-threshold; otherwise we have rolled + * generations as the generation-threshold was reached, then the first condition (eg. [1]) is already satisfied. + * + * This method is to maintain translog only, thus IndexWriter#hasUncommittedChanges condition is not considered. */ - final long uncommittedSizeOfNewCommit = translog.sizeOfGensAboveSeqNoInBytes(localCheckpointTracker.getCheckpoint() + 1); - return uncommittedSizeOfNewCommit < uncommittedSizeOfCurrentCommit; + final long translogGenerationOfNewCommit = + translog.getMinGenerationForSeqNo(localCheckpointTracker.getCheckpoint() + 1).translogFileGeneration; + return translogGenerationOfLastCommit < translogGenerationOfNewCommit + || localCheckpointTracker.getCheckpoint() == localCheckpointTracker.getMaxSeqNo(); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/translog/Translog.java b/server/src/main/java/org/elasticsearch/index/translog/Translog.java index c34f851195a9f..c2d494fd07a34 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/server/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -356,26 +356,11 @@ public long getMinFileGeneration() { } } - - /** - * Returns the number of operations in the translog files that aren't committed to lucene. - */ - public int uncommittedOperations() { - return totalOperations(deletionPolicy.getTranslogGenerationOfLastCommit()); - } - - /** - * Returns the size in bytes of the translog files that aren't committed to lucene. - */ - public long uncommittedSizeInBytes() { - return sizeInBytesByMinGen(deletionPolicy.getTranslogGenerationOfLastCommit()); - } - /** * Returns the number of operations in the translog files */ public int totalOperations() { - return totalOperations(-1); + return totalOperationsByMinGen(-1); } /** @@ -406,9 +391,9 @@ static long findEarliestLastModifiedAge(long currentTime, Iterable {}); assertTrue(shard.shouldPeriodicallyFlush()); final Translog translog = shard.getEngine().getTranslog(); - assertEquals(2, translog.uncommittedOperations()); + assertEquals(2, translog.stats().getUncommittedOperations()); client().prepareIndex("test", "test", "2").setSource("{}", XContentType.JSON) .setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get(); assertBusy(() -> { // this is async assertFalse(shard.shouldPeriodicallyFlush()); }); - assertEquals(0, translog.uncommittedOperations()); + assertEquals(0, translog.stats().getUncommittedOperations()); translog.sync(); - long size = Math.max(translog.uncommittedSizeInBytes(), Translog.DEFAULT_HEADER_SIZE_IN_BYTES + 1); - logger.info("--> current translog size: [{}] num_ops [{}] generation [{}]", translog.uncommittedSizeInBytes(), - translog.uncommittedOperations(), translog.getGeneration()); + long size = Math.max(translog.stats().getUncommittedSizeInBytes(), Translog.DEFAULT_HEADER_SIZE_IN_BYTES + 1); + logger.info("--> current translog size: [{}] num_ops [{}] generation [{}]", + translog.stats().getUncommittedSizeInBytes(), translog.stats().getUncommittedOperations(), translog.getGeneration()); client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put( IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(size, ByteSizeUnit.BYTES)) .build()).get(); client().prepareDelete("test", "test", "2").get(); - logger.info("--> translog size after delete: [{}] num_ops [{}] generation [{}]", translog.uncommittedSizeInBytes(), - translog.uncommittedOperations(), translog.getGeneration()); + logger.info("--> translog size after delete: [{}] num_ops [{}] generation [{}]", + translog.stats().getUncommittedSizeInBytes(), translog.stats().getUncommittedOperations(), translog.getGeneration()); assertBusy(() -> { // this is async - logger.info("--> translog size on iter : [{}] num_ops [{}] generation [{}]", translog.uncommittedSizeInBytes(), - translog.uncommittedOperations(), translog.getGeneration()); + logger.info("--> translog size on iter : [{}] num_ops [{}] generation [{}]", + translog.stats().getUncommittedSizeInBytes(), translog.stats().getUncommittedOperations(), translog.getGeneration()); assertFalse(shard.shouldPeriodicallyFlush()); }); - assertEquals(0, translog.uncommittedOperations()); + assertEquals(0, translog.stats().getUncommittedOperations()); } public void testMaybeRollTranslogGeneration() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index c18784873a472..1fb36486c2b3f 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -501,10 +501,10 @@ public void testUncommittedOperations() throws Exception { translog.rollGeneration(); operationsInLastGen = 0; } - assertThat(translog.uncommittedOperations(), equalTo(uncommittedOps)); + assertThat(translog.stats().getUncommittedOperations(), equalTo(uncommittedOps)); if (frequently()) { markCurrentGenAsCommitted(translog); - assertThat(translog.uncommittedOperations(), equalTo(operationsInLastGen)); + assertThat(translog.stats().getUncommittedOperations(), equalTo(operationsInLastGen)); uncommittedOps = operationsInLastGen; } } @@ -2514,7 +2514,7 @@ public void testRollGeneration() throws Exception { long minGenForRecovery = randomLongBetween(generation, generation + rolls); commit(translog, minGenForRecovery, generation + rolls); assertThat(translog.currentFileGeneration(), equalTo(generation + rolls)); - assertThat(translog.uncommittedOperations(), equalTo(0)); + assertThat(translog.stats().getUncommittedOperations(), equalTo(0)); if (longRetention) { for (int i = 0; i <= rolls; i++) { assertFileIsPresent(translog, generation + i); diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java index a496664c0260b..49e557c3dde78 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java @@ -306,7 +306,7 @@ public void testShouldFlushAfterPeerRecovery() throws Exception { try (ReplicationGroup shards = createGroup(0)) { shards.startAll(); int numDocs = shards.indexDocs(between(10, 100)); - final long translogSizeOnPrimary = shards.getPrimary().getTranslog().uncommittedSizeInBytes(); + final long translogSizeOnPrimary = shards.getPrimary().translogStats().getUncommittedSizeInBytes(); shards.flush(); final IndexShard replica = shards.addReplica(); From a9677023da73325f1b8be5e0cd7df234b75cd764 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 22 Mar 2018 21:23:52 -0400 Subject: [PATCH 16/27] Add note to low-level client docs for DNS caching (#29213) This commit adds a note to the low-level REST client docs regarding the possibility of being impacted by the JVM DNS cache policy under a default security manager policy. --- docs/java-rest/low-level/configuration.asciidoc | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/docs/java-rest/low-level/configuration.asciidoc b/docs/java-rest/low-level/configuration.asciidoc index 54f7cd2817354..b0753496558bb 100644 --- a/docs/java-rest/low-level/configuration.asciidoc +++ b/docs/java-rest/low-level/configuration.asciidoc @@ -86,3 +86,16 @@ will be used. For any other required configuration needed, the Apache HttpAsyncClient docs should be consulted: https://hc.apache.org/httpcomponents-asyncclient-4.1.x/ . + +NOTE: If your application runs under the security manager you might be subject +to the JVM default policies of caching positive hostname resolutions +indefinitely and negative hostname resolutions for ten seconds. If the resolved +addresses of the hosts to which you are connecting the client to vary with time +then you might want to modify the default JVM behavior. These can be modified by +adding +http://docs.oracle.com/javase/8/docs/technotes/guides/net/properties.html[`networkaddress.cache.ttl=`] +and +http://docs.oracle.com/javase/8/docs/technotes/guides/net/properties.html[`networkaddress.cache.negative.ttl=`] +to your +http://docs.oracle.com/javase/8/docs/technotes/guides/security/PolicyFiles.html[Java +security policy]. From 111f0788a2e2580ebc3ecdbb68eda28560a7f5fa Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 22 Mar 2018 23:06:53 -0400 Subject: [PATCH 17/27] Add error file docs to important settings This commit adds the error file documentation to the important settings docs so that the page is actually visible. --- docs/reference/setup/important-settings.asciidoc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/reference/setup/important-settings.asciidoc b/docs/reference/setup/important-settings.asciidoc index 997f267a7e29f..b9b99b708031e 100644 --- a/docs/reference/setup/important-settings.asciidoc +++ b/docs/reference/setup/important-settings.asciidoc @@ -30,3 +30,5 @@ include::important-settings/heap-size.asciidoc[] include::important-settings/heap-dump-path.asciidoc[] include::important-settings/gc-logging.asciidoc[] + +include::important-settings/error-file.asciidoc[] From 8328b9c5cd958aa0e0b9b28d2585a42e42b6a96c Mon Sep 17 00:00:00 2001 From: Milan Chovatiya <31452855+milan15@users.noreply.github.com> Date: Fri, 23 Mar 2018 02:37:31 -0700 Subject: [PATCH 18/27] REST : Split `RestUpgradeAction` into two actions (#29124) Closes #29062 --- .../elasticsearch/action/ActionModule.java | 2 + .../admin/indices/RestUpgradeAction.java | 31 --------- .../indices/RestUpgradeStatusAction.java | 68 +++++++++++++++++++ 3 files changed, 70 insertions(+), 31 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpgradeStatusAction.java diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index 51abf6b0222e1..60ba0a43396e4 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -275,6 +275,7 @@ import org.elasticsearch.rest.action.admin.indices.RestSyncedFlushAction; import org.elasticsearch.rest.action.admin.indices.RestUpdateSettingsAction; import org.elasticsearch.rest.action.admin.indices.RestUpgradeAction; +import org.elasticsearch.rest.action.admin.indices.RestUpgradeStatusAction; import org.elasticsearch.rest.action.admin.indices.RestValidateQueryAction; import org.elasticsearch.rest.action.cat.AbstractCatAction; import org.elasticsearch.rest.action.cat.RestAliasAction; @@ -592,6 +593,7 @@ public void initRestHandlers(Supplier nodesInCluster) { registerHandler.accept(new RestSyncedFlushAction(settings, restController)); registerHandler.accept(new RestForceMergeAction(settings, restController)); registerHandler.accept(new RestUpgradeAction(settings, restController)); + registerHandler.accept(new RestUpgradeStatusAction(settings, restController)); registerHandler.accept(new RestClearIndicesCacheAction(settings, restController)); registerHandler.accept(new RestIndexAction(settings, restController)); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpgradeAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpgradeAction.java index 1d32c14655ade..0f8c0ca51a72f 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpgradeAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpgradeAction.java @@ -20,8 +20,6 @@ package org.elasticsearch.rest.action.admin.indices; import org.elasticsearch.Version; -import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusRequest; -import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusResponse; import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest; import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeResponse; import org.elasticsearch.action.support.IndicesOptions; @@ -40,7 +38,6 @@ import java.io.IOException; import java.util.Map; -import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestStatus.OK; import static org.elasticsearch.rest.action.RestActions.buildBroadcastShardsHeader; @@ -50,9 +47,6 @@ public RestUpgradeAction(Settings settings, RestController controller) { super(settings); controller.registerHandler(POST, "/_upgrade", this); controller.registerHandler(POST, "/{index}/_upgrade", this); - - controller.registerHandler(GET, "/_upgrade", this); - controller.registerHandler(GET, "/{index}/_upgrade", this); } @Override @@ -62,30 +56,6 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { - if (request.method().equals(RestRequest.Method.GET)) { - return handleGet(request, client); - } else if (request.method().equals(RestRequest.Method.POST)) { - return handlePost(request, client); - } else { - throw new IllegalArgumentException("illegal method [" + request.method() + "] for request [" + request.path() + "]"); - } - } - - private RestChannelConsumer handleGet(final RestRequest request, NodeClient client) { - UpgradeStatusRequest statusRequest = new UpgradeStatusRequest(Strings.splitStringByCommaToArray(request.param("index"))); - statusRequest.indicesOptions(IndicesOptions.fromRequest(request, statusRequest.indicesOptions())); - return channel -> client.admin().indices().upgradeStatus(statusRequest, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(UpgradeStatusResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - response.toXContent(builder, request); - builder.endObject(); - return new BytesRestResponse(OK, builder); - } - }); - } - - private RestChannelConsumer handlePost(final RestRequest request, NodeClient client) { UpgradeRequest upgradeReq = new UpgradeRequest(Strings.splitStringByCommaToArray(request.param("index"))); upgradeReq.indicesOptions(IndicesOptions.fromRequest(request, upgradeReq.indicesOptions())); upgradeReq.upgradeOnlyAncientSegments(request.paramAsBoolean("only_ancient_segments", false)); @@ -107,5 +77,4 @@ public RestResponse buildResponse(UpgradeResponse response, XContentBuilder buil } }); } - } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpgradeStatusAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpgradeStatusAction.java new file mode 100644 index 0000000000000..a20334c3be188 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpgradeStatusAction.java @@ -0,0 +1,68 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.rest.action.admin.indices; + +import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusRequest; +import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusResponse; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.action.RestBuilderListener; + +import java.io.IOException; + +import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestStatus.OK; + +public class RestUpgradeStatusAction extends BaseRestHandler { + + public RestUpgradeStatusAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(GET, "/_upgrade", this); + controller.registerHandler(GET, "/{index}/_upgrade", this); + } + + @Override + public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + UpgradeStatusRequest statusRequest = new UpgradeStatusRequest(Strings.splitStringByCommaToArray(request.param("index"))); + statusRequest.indicesOptions(IndicesOptions.fromRequest(request, statusRequest.indicesOptions())); + return channel -> client.admin().indices().upgradeStatus(statusRequest, new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(UpgradeStatusResponse response, XContentBuilder builder) throws Exception { + builder.startObject(); + response.toXContent(builder, request); + builder.endObject(); + return new BytesRestResponse(OK, builder); + } + }); + } + + @Override + public String getName() { + return "upgrade_status_action"; + } +} From 4a8099c6962da1e68309e0e382d0e6198a3937c2 Mon Sep 17 00:00:00 2001 From: Yu Date: Fri, 23 Mar 2018 10:53:37 +0100 Subject: [PATCH 19/27] Change BroadcastResponse from ToXContentFragment to ToXContentObject (#28878) While working on #27799, we find that it might make sense to change BroadcastResponse from ToXContentFragment to ToXContentObject, seeing that it's rather a complete XContent object and also the other Responses are normally ToXContentObject. By doing this, we can also move the XContent build logic of BroadcastResponse's subclasses, from Rest Layer to the concrete classes themselves. Relates to #3889 --- .../indices/recovery/RecoveryResponse.java | 21 ++------ .../recovery/TransportRecoveryAction.java | 4 +- .../segments/IndicesSegmentResponse.java | 10 ++-- .../indices/stats/IndicesStatsResponse.java | 19 ++------ .../upgrade/get/UpgradeStatusResponse.java | 5 +- .../indices/upgrade/post/UpgradeResponse.java | 13 +++++ .../validate/query/ValidateQueryResponse.java | 36 +++++++++++++- .../support/broadcast/BroadcastResponse.java | 13 ++++- .../indices/RestClearIndicesCacheAction.java | 18 +------ .../action/admin/indices/RestFlushAction.java | 17 +------ .../admin/indices/RestForceMergeAction.java | 17 +------ .../indices/RestIndicesSegmentsAction.java | 20 +------- .../admin/indices/RestIndicesStatsAction.java | 19 +------- .../admin/indices/RestRecoveryAction.java | 20 +------- .../admin/indices/RestRefreshAction.java | 15 ++---- .../admin/indices/RestUpgradeAction.java | 30 ++---------- .../indices/RestUpgradeStatusAction.java | 17 +------ .../indices/RestValidateQueryAction.java | 48 ++----------------- .../stats/IndicesStatsResponseTests.java | 4 +- .../action/cat/RestRecoveryActionTests.java | 2 - 20 files changed, 105 insertions(+), 243 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryResponse.java index 1a9c86049f8c6..7c51edc4d957e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryResponse.java @@ -24,7 +24,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.indices.recovery.RecoveryState; @@ -37,9 +36,8 @@ /** * Information regarding the recovery state of indices and their associated shards. */ -public class RecoveryResponse extends BroadcastResponse implements ToXContentFragment { +public class RecoveryResponse extends BroadcastResponse { - private boolean detailed = false; private Map> shardRecoveryStates = new HashMap<>(); public RecoveryResponse() { } @@ -51,36 +49,26 @@ public RecoveryResponse() { } * @param totalShards Total count of shards seen * @param successfulShards Count of shards successfully processed * @param failedShards Count of shards which failed to process - * @param detailed Display detailed metrics * @param shardRecoveryStates Map of indices to shard recovery information * @param shardFailures List of failures processing shards */ - public RecoveryResponse(int totalShards, int successfulShards, int failedShards, boolean detailed, - Map> shardRecoveryStates, + public RecoveryResponse(int totalShards, int successfulShards, int failedShards, Map> shardRecoveryStates, List shardFailures) { super(totalShards, successfulShards, failedShards, shardFailures); this.shardRecoveryStates = shardRecoveryStates; - this.detailed = detailed; } public boolean hasRecoveries() { return shardRecoveryStates.size() > 0; } - public boolean detailed() { - return detailed; - } - - public void detailed(boolean detailed) { - this.detailed = detailed; - } - public Map> shardRecoveryStates() { return shardRecoveryStates; } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); if (hasRecoveries()) { for (String index : shardRecoveryStates.keySet()) { List recoveryStates = shardRecoveryStates.get(index); @@ -98,6 +86,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.endObject(); } } + builder.endObject(); return builder; } @@ -133,4 +122,4 @@ public void readFrom(StreamInput in) throws IOException { public String toString() { return Strings.toString(this, true, true); } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java index 0e11aed9d24fd..c67f5040cdd66 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java @@ -87,7 +87,7 @@ protected RecoveryResponse newResponse(RecoveryRequest request, int totalShards, shardResponses.get(indexName).add(recoveryState); } } - return new RecoveryResponse(totalShards, successfulShards, failedShards, request.detailed(), shardResponses, shardFailures); + return new RecoveryResponse(totalShards, successfulShards, failedShards, shardResponses, shardFailures); } @Override @@ -118,4 +118,4 @@ protected ClusterBlockException checkGlobalBlock(ClusterState state, RecoveryReq protected ClusterBlockException checkRequestBlock(ClusterState state, RecoveryRequest request, String[] concreteIndices) { return state.blocks().indicesBlockedException(ClusterBlockLevel.READ, concreteIndices); } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java index e8e2f5376cd24..7faf24329dadd 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java @@ -29,7 +29,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.engine.Segment; @@ -43,7 +42,7 @@ import java.util.Map; import java.util.Set; -public class IndicesSegmentResponse extends BroadcastResponse implements ToXContentFragment { +public class IndicesSegmentResponse extends BroadcastResponse { private ShardSegments[] shards; @@ -103,7 +102,7 @@ public void writeTo(StreamOutput out) throws IOException { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + protected void addCustomXContentFields(XContentBuilder builder, Params params) throws IOException { builder.startObject(Fields.INDICES); for (IndexSegments indexSegments : getIndices().values()) { @@ -173,10 +172,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } builder.endObject(); - return builder; } - static void toXContent(XContentBuilder builder, Sort sort) throws IOException { + private static void toXContent(XContentBuilder builder, Sort sort) throws IOException { builder.startArray("sort"); for (SortField field : sort.getSort()) { builder.startObject(); @@ -195,7 +193,7 @@ static void toXContent(XContentBuilder builder, Sort sort) throws IOException { builder.endArray(); } - static void toXContent(XContentBuilder builder, Accountable tree) throws IOException { + private static void toXContent(XContentBuilder builder, Accountable tree) throws IOException { builder.startObject(); builder.field(Fields.DESCRIPTION, tree.toString()); builder.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, new ByteSizeValue(tree.ramBytesUsed())); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java index 46aef007e6bab..7406dc4f2d12c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java @@ -25,9 +25,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; import java.io.IOException; import java.util.ArrayList; @@ -39,7 +37,7 @@ import static java.util.Collections.unmodifiableMap; -public class IndicesStatsResponse extends BroadcastResponse implements ToXContentFragment { +public class IndicesStatsResponse extends BroadcastResponse { private ShardStats[] shards; @@ -147,7 +145,7 @@ public void writeTo(StreamOutput out) throws IOException { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + protected void addCustomXContentFields(XContentBuilder builder, Params params) throws IOException { final String level = params.param("level", "indices"); final boolean isLevelValid = "cluster".equalsIgnoreCase(level) || "indices".equalsIgnoreCase(level) || "shards".equalsIgnoreCase(level); @@ -155,7 +153,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws throw new IllegalArgumentException("level parameter must be one of [cluster] or [indices] or [shards] but was [" + level + "]"); } - builder.startObject("_all"); builder.startObject("primaries"); @@ -198,8 +195,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } builder.endObject(); } - - return builder; } static final class Fields { @@ -209,14 +204,6 @@ static final class Fields { @Override public String toString() { - try { - XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint(); - builder.startObject(); - toXContent(builder, EMPTY_PARAMS); - builder.endObject(); - return Strings.toString(builder); - } catch (IOException e) { - return "{ \"error\" : \"" + e.getMessage() + "\"}"; - } + return Strings.toString(this, true, false); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java index 71110f18b875c..a45b8feda89ce 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java @@ -23,7 +23,6 @@ import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; @@ -34,7 +33,7 @@ import java.util.Map; import java.util.Set; -public class UpgradeStatusResponse extends BroadcastResponse implements ToXContentFragment { +public class UpgradeStatusResponse extends BroadcastResponse { private ShardUpgradeStatus[] shards; private Map indicesUpgradeStatus; @@ -116,6 +115,7 @@ public long getToUpgradeBytesAncient() { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, getTotalBytes()); builder.byteSizeField(Fields.SIZE_TO_UPGRADE_IN_BYTES, Fields.SIZE_TO_UPGRADE, getToUpgradeBytes()); builder.byteSizeField(Fields.SIZE_TO_UPGRADE_ANCIENT_IN_BYTES, Fields.SIZE_TO_UPGRADE_ANCIENT, getToUpgradeBytesAncient()); @@ -161,6 +161,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } builder.endObject(); } + builder.endObject(); return builder; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeResponse.java index db49921d43532..4a760e273a0fa 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeResponse.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; import java.util.HashMap; @@ -74,6 +75,18 @@ public void writeTo(StreamOutput out) throws IOException { } } + @Override + protected void addCustomXContentFields(XContentBuilder builder, Params params) throws IOException { + builder.startObject("upgraded_indices"); + for (Map.Entry> entry : versions.entrySet()) { + builder.startObject(entry.getKey()); + builder.field("upgrade_version", entry.getValue().v1()); + builder.field("oldest_lucene_segment_version", entry.getValue().v2()); + builder.endObject(); + } + builder.endObject(); + } + /** * Returns the highest upgrade version of the node that performed metadata upgrade and the * the version of the oldest lucene segment for each index that was upgraded. diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java index eff37ff4b0cb4..5bb11dd56e00b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java @@ -23,6 +23,7 @@ import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; import java.util.ArrayList; @@ -38,8 +39,15 @@ */ public class ValidateQueryResponse extends BroadcastResponse { + public static final String INDEX_FIELD = "index"; + public static final String SHARD_FIELD = "shard"; + public static final String VALID_FIELD = "valid"; + public static final String EXPLANATIONS_FIELD = "explanations"; + public static final String ERROR_FIELD = "error"; + public static final String EXPLANATION_FIELD = "explanation"; + private boolean valid; - + private List queryExplanations; ValidateQueryResponse() { @@ -96,4 +104,30 @@ public void writeTo(StreamOutput out) throws IOException { } } + + @Override + protected void addCustomXContentFields(XContentBuilder builder, Params params) throws IOException { + builder.field(VALID_FIELD, isValid()); + if (getQueryExplanation() != null && !getQueryExplanation().isEmpty()) { + builder.startArray(EXPLANATIONS_FIELD); + for (QueryExplanation explanation : getQueryExplanation()) { + builder.startObject(); + if (explanation.getIndex() != null) { + builder.field(INDEX_FIELD, explanation.getIndex()); + } + if(explanation.getShard() >= 0) { + builder.field(SHARD_FIELD, explanation.getShard()); + } + builder.field(VALID_FIELD, explanation.isValid()); + if (explanation.getError() != null) { + builder.field(ERROR_FIELD, explanation.getError()); + } + if (explanation.getExplanation() != null) { + builder.field(EXPLANATION_FIELD, explanation.getExplanation()); + } + builder.endObject(); + } + builder.endArray(); + } + } } diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java index ce812644faea6..47bc50be330b6 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java @@ -25,7 +25,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.RestActions; @@ -40,7 +40,7 @@ /** * Base class for all broadcast operation based responses. */ -public class BroadcastResponse extends ActionResponse implements ToXContentFragment { +public class BroadcastResponse extends ActionResponse implements ToXContentObject { public static final DefaultShardOperationFailedException[] EMPTY = new DefaultShardOperationFailedException[0]; @@ -149,7 +149,16 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); RestActions.buildBroadcastShardsHeader(builder, params, this); + addCustomXContentFields(builder, params); + builder.endObject(); return builder; } + + /** + * Override in subclass to add custom fields following the common `_shards` field + */ + protected void addCustomXContentFields(XContentBuilder builder, Params params) throws IOException { + } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestClearIndicesCacheAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestClearIndicesCacheAction.java index a63676c1e09ed..266c1cb68f03f 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestClearIndicesCacheAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestClearIndicesCacheAction.java @@ -20,24 +20,19 @@ package org.elasticsearch.rest.action.admin.indices; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; -import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; -import static org.elasticsearch.rest.RestStatus.OK; public class RestClearIndicesCacheAction extends BaseRestHandler { @@ -61,16 +56,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC Strings.splitStringByCommaToArray(request.param("index"))); clearIndicesCacheRequest.indicesOptions(IndicesOptions.fromRequest(request, clearIndicesCacheRequest.indicesOptions())); fromRequest(request, clearIndicesCacheRequest); - return channel -> - client.admin().indices().clearCache(clearIndicesCacheRequest, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(ClearIndicesCacheResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - response.toXContent(builder, request); - builder.endObject(); - return new BytesRestResponse(OK, builder); - } - }); + return channel -> client.admin().indices().clearCache(clearIndicesCacheRequest, new RestToXContentListener<>(channel)); } @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestFlushAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestFlushAction.java index 8eb318e660c60..4879a54f4feae 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestFlushAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestFlushAction.java @@ -20,24 +20,19 @@ package org.elasticsearch.rest.action.admin.indices; import org.elasticsearch.action.admin.indices.flush.FlushRequest; -import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; -import static org.elasticsearch.rest.RestStatus.OK; public class RestFlushAction extends BaseRestHandler { public RestFlushAction(Settings settings, RestController controller) { @@ -60,14 +55,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC flushRequest.indicesOptions(IndicesOptions.fromRequest(request, flushRequest.indicesOptions())); flushRequest.force(request.paramAsBoolean("force", flushRequest.force())); flushRequest.waitIfOngoing(request.paramAsBoolean("wait_if_ongoing", flushRequest.waitIfOngoing())); - return channel -> client.admin().indices().flush(flushRequest, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(FlushResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - response.toXContent(builder, request); - builder.endObject(); - return new BytesRestResponse(OK, builder); - } - }); + return channel -> client.admin().indices().flush(flushRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestForceMergeAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestForceMergeAction.java index 394c4822f0ea8..dcc397be14263 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestForceMergeAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestForceMergeAction.java @@ -20,23 +20,18 @@ package org.elasticsearch.rest.action.admin.indices; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; -import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; import static org.elasticsearch.rest.RestRequest.Method.POST; -import static org.elasticsearch.rest.RestStatus.OK; public class RestForceMergeAction extends BaseRestHandler { public RestForceMergeAction(Settings settings, RestController controller) { @@ -57,14 +52,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC mergeRequest.maxNumSegments(request.paramAsInt("max_num_segments", mergeRequest.maxNumSegments())); mergeRequest.onlyExpungeDeletes(request.paramAsBoolean("only_expunge_deletes", mergeRequest.onlyExpungeDeletes())); mergeRequest.flush(request.paramAsBoolean("flush", mergeRequest.flush())); - return channel -> client.admin().indices().forceMerge(mergeRequest, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(ForceMergeResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - response.toXContent(builder, request); - builder.endObject(); - return new BytesRestResponse(OK, builder); - } - }); + return channel -> client.admin().indices().forceMerge(mergeRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesSegmentsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesSegmentsAction.java index a57a404baf2ef..1beec61e6dd37 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesSegmentsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesSegmentsAction.java @@ -19,25 +19,19 @@ package org.elasticsearch.rest.action.admin.indices; -import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; import static org.elasticsearch.rest.RestRequest.Method.GET; -import static org.elasticsearch.rest.RestStatus.OK; -import static org.elasticsearch.rest.action.RestActions.buildBroadcastShardsHeader; public class RestIndicesSegmentsAction extends BaseRestHandler { public RestIndicesSegmentsAction(Settings settings, RestController controller) { @@ -57,16 +51,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC Strings.splitStringByCommaToArray(request.param("index"))); indicesSegmentsRequest.verbose(request.paramAsBoolean("verbose", false)); indicesSegmentsRequest.indicesOptions(IndicesOptions.fromRequest(request, indicesSegmentsRequest.indicesOptions())); - return channel -> - client.admin().indices().segments(indicesSegmentsRequest, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(IndicesSegmentResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - buildBroadcastShardsHeader(builder, request, response); - response.toXContent(builder, request); - builder.endObject(); - return new BytesRestResponse(OK, builder); - } - }); + return channel -> client.admin().indices().segments(indicesSegmentsRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsAction.java index ca554301b937d..1dbbd6f1696db 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsAction.java @@ -20,18 +20,14 @@ package org.elasticsearch.rest.action.admin.indices; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; -import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; import java.util.Collections; @@ -43,8 +39,6 @@ import java.util.function.Consumer; import static org.elasticsearch.rest.RestRequest.Method.GET; -import static org.elasticsearch.rest.RestStatus.OK; -import static org.elasticsearch.rest.action.RestActions.buildBroadcastShardsHeader; public class RestIndicesStatsAction extends BaseRestHandler { public RestIndicesStatsAction(Settings settings, RestController controller) { @@ -141,16 +135,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC indicesStatsRequest.includeSegmentFileSizes(request.paramAsBoolean("include_segment_file_sizes", false)); } - return channel -> client.admin().indices().stats(indicesStatsRequest, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(IndicesStatsResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - buildBroadcastShardsHeader(builder, request, response); - response.toXContent(builder, request); - builder.endObject(); - return new BytesRestResponse(OK, builder); - } - }); + return channel -> client.admin().indices().stats(indicesStatsRequest, new RestToXContentListener<>(channel)); } @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRecoveryAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRecoveryAction.java index 4516ebeeb565d..b445cb3a6764a 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRecoveryAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRecoveryAction.java @@ -20,23 +20,18 @@ package org.elasticsearch.rest.action.admin.indices; import org.elasticsearch.action.admin.indices.recovery.RecoveryRequest; -import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; import static org.elasticsearch.rest.RestRequest.Method.GET; -import static org.elasticsearch.rest.RestStatus.OK; /** * REST handler to report on index recoveries. @@ -60,18 +55,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC recoveryRequest.detailed(request.paramAsBoolean("detailed", false)); recoveryRequest.activeOnly(request.paramAsBoolean("active_only", false)); recoveryRequest.indicesOptions(IndicesOptions.fromRequest(request, recoveryRequest.indicesOptions())); - - return channel -> client.admin().indices().recoveries(recoveryRequest, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(RecoveryResponse response, XContentBuilder builder) throws Exception { - response.detailed(recoveryRequest.detailed()); - builder.startObject(); - response.toXContent(builder, request); - builder.endObject(); - return new BytesRestResponse(OK, builder); - } - }); - + return channel -> client.admin().indices().recoveries(recoveryRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRefreshAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRefreshAction.java index 486d8664a49d2..1f0f81e0285ce 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRefreshAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRefreshAction.java @@ -25,13 +25,11 @@ import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; @@ -57,13 +55,10 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { RefreshRequest refreshRequest = new RefreshRequest(Strings.splitStringByCommaToArray(request.param("index"))); refreshRequest.indicesOptions(IndicesOptions.fromRequest(request, refreshRequest.indicesOptions())); - return channel -> client.admin().indices().refresh(refreshRequest, new RestBuilderListener(channel) { + return channel -> client.admin().indices().refresh(refreshRequest, new RestToXContentListener(channel) { @Override - public RestResponse buildResponse(RefreshResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - response.toXContent(builder, request); - builder.endObject(); - return new BytesRestResponse(response.getStatus(), builder); + protected RestStatus getStatus(RefreshResponse response) { + return response.getStatus(); } }); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpgradeAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpgradeAction.java index 0f8c0ca51a72f..9201c4504823d 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpgradeAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpgradeAction.java @@ -19,28 +19,20 @@ package org.elasticsearch.rest.action.admin.indices; -import org.elasticsearch.Version; +import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusRequest; import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest; -import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; -import java.util.Map; import static org.elasticsearch.rest.RestRequest.Method.POST; -import static org.elasticsearch.rest.RestStatus.OK; -import static org.elasticsearch.rest.action.RestActions.buildBroadcastShardsHeader; public class RestUpgradeAction extends BaseRestHandler { public RestUpgradeAction(Settings settings, RestController controller) { @@ -59,22 +51,6 @@ public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client UpgradeRequest upgradeReq = new UpgradeRequest(Strings.splitStringByCommaToArray(request.param("index"))); upgradeReq.indicesOptions(IndicesOptions.fromRequest(request, upgradeReq.indicesOptions())); upgradeReq.upgradeOnlyAncientSegments(request.paramAsBoolean("only_ancient_segments", false)); - return channel -> client.admin().indices().upgrade(upgradeReq, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(UpgradeResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - buildBroadcastShardsHeader(builder, request, response); - builder.startObject("upgraded_indices"); - for (Map.Entry> entry : response.versions().entrySet()) { - builder.startObject(entry.getKey()); - builder.field("upgrade_version", entry.getValue().v1()); - builder.field("oldest_lucene_segment_version", entry.getValue().v2()); - builder.endObject(); - } - builder.endObject(); - builder.endObject(); - return new BytesRestResponse(OK, builder); - } - }); + return channel -> client.admin().indices().upgrade(upgradeReq, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpgradeStatusAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpgradeStatusAction.java index a20334c3be188..1b21e125cdc47 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpgradeStatusAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpgradeStatusAction.java @@ -20,23 +20,18 @@ package org.elasticsearch.rest.action.admin.indices; import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusRequest; -import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; import static org.elasticsearch.rest.RestRequest.Method.GET; -import static org.elasticsearch.rest.RestStatus.OK; public class RestUpgradeStatusAction extends BaseRestHandler { @@ -50,15 +45,7 @@ public RestUpgradeStatusAction(Settings settings, RestController controller) { public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { UpgradeStatusRequest statusRequest = new UpgradeStatusRequest(Strings.splitStringByCommaToArray(request.param("index"))); statusRequest.indicesOptions(IndicesOptions.fromRequest(request, statusRequest.indicesOptions())); - return channel -> client.admin().indices().upgradeStatus(statusRequest, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(UpgradeStatusResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - response.toXContent(builder, request); - builder.endObject(); - return new BytesRestResponse(OK, builder); - } - }); + return channel -> client.admin().indices().upgradeStatus(statusRequest, new RestToXContentListener<>(channel)); } @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryAction.java index df1c14c480650..57486396f911b 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.rest.action.admin.indices; -import org.elasticsearch.action.admin.indices.validate.query.QueryExplanation; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse; import org.elasticsearch.action.support.IndicesOptions; @@ -33,16 +32,14 @@ import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.action.RestActions; -import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestStatus.OK; -import static org.elasticsearch.rest.action.RestActions.buildBroadcastShardsHeader; public class RestValidateQueryAction extends BaseRestHandler { public RestValidateQueryAction(Settings settings, RestController controller) { @@ -91,37 +88,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC handleException(validateQueryRequest, finalBodyParsingException.getMessage(), channel); } } else { - client.admin().indices().validateQuery(validateQueryRequest, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(ValidateQueryResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - builder.field(VALID_FIELD, response.isValid()); - buildBroadcastShardsHeader(builder, request, response); - if (response.getQueryExplanation() != null && !response.getQueryExplanation().isEmpty()) { - builder.startArray(EXPLANATIONS_FIELD); - for (QueryExplanation explanation : response.getQueryExplanation()) { - builder.startObject(); - if (explanation.getIndex() != null) { - builder.field(INDEX_FIELD, explanation.getIndex()); - } - if(explanation.getShard() >= 0) { - builder.field(SHARD_FIELD, explanation.getShard()); - } - builder.field(VALID_FIELD, explanation.isValid()); - if (explanation.getError() != null) { - builder.field(ERROR_FIELD, explanation.getError()); - } - if (explanation.getExplanation() != null) { - builder.field(EXPLANATION_FIELD, explanation.getExplanation()); - } - builder.endObject(); - } - builder.endArray(); - } - builder.endObject(); - return new BytesRestResponse(OK, builder); - } - }); + client.admin().indices().validateQuery(validateQueryRequest, new RestToXContentListener<>(channel)); } }; } @@ -132,18 +99,11 @@ private void handleException(final ValidateQueryRequest request, final String me private static BytesRestResponse buildErrorResponse(XContentBuilder builder, String error, boolean explain) throws IOException { builder.startObject(); - builder.field(VALID_FIELD, false); + builder.field(ValidateQueryResponse.VALID_FIELD, false); if (explain) { - builder.field(ERROR_FIELD, error); + builder.field(ValidateQueryResponse.ERROR_FIELD, error); } builder.endObject(); return new BytesRestResponse(OK, builder); } - - private static final String INDEX_FIELD = "index"; - private static final String SHARD_FIELD = "shard"; - private static final String VALID_FIELD = "valid"; - private static final String EXPLANATIONS_FIELD = "explanations"; - private static final String ERROR_FIELD = "error"; - private static final String EXPLANATION_FIELD = "explanation"; } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponseTests.java index 0f24a520b84b7..a7e3ee57a08c3 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponseTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.admin.indices.stats; import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.test.ESTestCase; import java.util.Collections; @@ -34,7 +35,8 @@ public void testInvalidLevel() { final IndicesStatsResponse response = new IndicesStatsResponse(); final String level = randomAlphaOfLength(16); final ToXContent.Params params = new ToXContent.MapParams(Collections.singletonMap("level", level)); - final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> response.toXContent(null, params)); + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> response.toXContent(JsonXContent.contentBuilder(), params)); assertThat( e, hasToString(containsString("level parameter must be one of [cluster] or [indices] or [shards] but was [" + level + "]"))); diff --git a/server/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java index ffebd804c609c..e99fb4cc1f258 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java @@ -57,7 +57,6 @@ public void testRestRecoveryAction() { final int totalShards = randomIntBetween(1, 32); final int successfulShards = Math.max(0, totalShards - randomIntBetween(1, 2)); final int failedShards = totalShards - successfulShards; - final boolean detailed = randomBoolean(); final Map> shardRecoveryStates = new HashMap<>(); final List recoveryStates = new ArrayList<>(); @@ -115,7 +114,6 @@ public void testRestRecoveryAction() { totalShards, successfulShards, failedShards, - detailed, shardRecoveryStates, shardFailures); final Table table = action.buildRecoveryTable(null, response); From 794de63232fb48c8ea29cb662eea153a934301ee Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 23 Mar 2018 07:35:50 -0400 Subject: [PATCH 20/27] Remove type casts in logging in server component (#28807) This commit removes type-casts in logging in the server component (other components will be done later). This also adds a parameterized message test which would catch breaking-changes related to lambdas in Log4J. --- .../health/TransportClusterHealthAction.java | 5 +- .../TransportClusterRerouteAction.java | 3 +- .../TransportClusterUpdateSettingsAction.java | 5 +- .../close/TransportCloseIndexAction.java | 3 +- .../delete/TransportDeleteIndexAction.java | 3 +- .../put/TransportPutMappingAction.java | 5 +- .../open/TransportOpenIndexAction.java | 3 +- .../put/TransportUpdateSettingsAction.java | 3 +- .../TransportDeleteIndexTemplateAction.java | 3 +- .../put/TransportPutIndexTemplateAction.java | 3 +- .../post/TransportUpgradeSettingsAction.java | 3 +- .../action/bulk/BulkRequestHandler.java | 5 +- .../action/bulk/TransportBulkAction.java | 3 +- .../action/bulk/TransportShardBulkAction.java | 5 +- .../get/TransportShardMultiGetAction.java | 3 +- .../search/AbstractSearchAsyncAction.java | 16 +-- .../action/search/ClearScrollController.java | 3 +- .../action/search/DfsQueryPhase.java | 7 +- .../action/search/FetchSearchPhase.java | 6 +- .../action/search/InitialSearchPhase.java | 33 ++---- .../support/HandledTransportAction.java | 9 +- .../support/ThreadedActionListener.java | 4 +- .../broadcast/TransportBroadcastAction.java | 18 +-- .../node/TransportBroadcastByNodeAction.java | 22 +--- .../master/TransportMasterNodeAction.java | 4 +- .../support/nodes/TransportNodesAction.java | 4 +- .../replication/ReplicationOperation.java | 2 +- .../TransportReplicationAction.java | 32 ++---- .../shard/TransportSingleShardAction.java | 12 +- .../support/tasks/TransportTasksAction.java | 4 +- .../TransportShardMultiTermsVectorAction.java | 3 +- .../bootstrap/BootstrapChecks.java | 9 +- ...ElasticsearchUncaughtExceptionHandler.java | 7 +- .../TransportClientNodesService.java | 18 +-- .../cluster/NodeConnectionsService.java | 9 +- .../action/shard/ShardStateAction.java | 17 ++- .../metadata/MetaDataCreateIndexService.java | 5 +- .../metadata/MetaDataIndexUpgradeService.java | 3 +- .../metadata/MetaDataMappingService.java | 4 +- .../cluster/routing/RoutingService.java | 7 +- .../service/ClusterApplierService.java | 7 +- .../cluster/service/MasterService.java | 37 ++---- .../elasticsearch/common/lucene/Lucene.java | 3 +- .../settings/AbstractScopedSettings.java | 6 +- .../common/util/IndexFolderUpgrader.java | 3 +- .../util/concurrent/LoggingRunnable.java | 3 +- ...AckClusterStatePublishResponseHandler.java | 3 +- .../discovery/single/SingleNodeDiscovery.java | 6 +- .../discovery/zen/MasterFaultDetection.java | 9 +- .../discovery/zen/NodeJoinController.java | 5 +- .../discovery/zen/NodesFaultDetection.java | 18 +-- .../zen/PublishClusterStateAction.java | 19 +--- .../discovery/zen/UnicastZenPing.java | 16 +-- .../discovery/zen/ZenDiscovery.java | 38 +++---- .../elasticsearch/env/NodeEnvironment.java | 7 +- .../gateway/AsyncShardFetch.java | 3 +- .../org/elasticsearch/gateway/Gateway.java | 13 +-- .../elasticsearch/gateway/GatewayService.java | 3 +- .../gateway/LocalAllocateDangledIndices.java | 5 +- .../gateway/MetaDataStateFormat.java | 4 +- .../gateway/MetaStateService.java | 3 +- .../gateway/PrimaryShardAllocator.java | 5 +- ...ransportNodesListGatewayStartedShards.java | 4 +- .../index/CompositeIndexEventListener.java | 21 ++-- .../org/elasticsearch/index/IndexService.java | 3 +- .../org/elasticsearch/index/IndexWarmer.java | 5 +- .../index/cache/bitset/BitsetFilterCache.java | 3 +- .../elasticsearch/index/engine/Engine.java | 29 ++--- .../index/engine/InternalEngine.java | 3 +- .../index/mapper/MapperService.java | 2 +- .../reindex/ClientScrollableHitSource.java | 8 +- .../elasticsearch/index/shard/IndexShard.java | 3 +- .../shard/IndexingOperationListener.java | 13 +-- .../index/shard/SearchOperationListener.java | 21 ++-- .../org/elasticsearch/index/store/Store.java | 13 +-- .../index/translog/Translog.java | 3 +- .../indices/IndexingMemoryController.java | 5 +- .../indices/analysis/HunspellService.java | 6 +- .../cluster/IndicesClusterStateService.java | 13 +-- .../indices/flush/SyncedFlushService.java | 7 +- .../recovery/PeerRecoveryTargetService.java | 26 ++--- .../recovery/RecoveriesCollection.java | 3 +- .../recovery/RecoverySourceHandler.java | 15 +-- .../indices/recovery/RecoveryTarget.java | 4 +- .../indices/store/IndicesStore.java | 11 +- .../org/elasticsearch/monitor/fs/FsProbe.java | 4 +- .../persistent/AllocatedPersistentTask.java | 9 +- .../PersistentTasksNodeService.java | 7 +- .../repositories/RepositoriesService.java | 13 +-- .../VerifyNodeRepositoryAction.java | 5 +- .../blobstore/BlobStoreRepository.java | 35 +++--- .../snapshots/RestoreService.java | 7 +- .../snapshots/SnapshotShardsService.java | 6 +- .../snapshots/SnapshotsService.java | 26 ++--- .../tasks/LoggingTaskListener.java | 3 +- .../org/elasticsearch/tasks/TaskManager.java | 11 +- .../tasks/TaskResultsService.java | 3 +- .../elasticsearch/threadpool/ThreadPool.java | 7 +- .../transport/RemoteClusterConnection.java | 18 +-- .../elasticsearch/transport/TcpTransport.java | 40 +++---- .../TransportChannelResponseHandler.java | 8 +- .../transport/TransportService.java | 4 +- .../cluster/service/ClusterServiceIT.java | 9 +- .../cluster/service/TaskBatcherTests.java | 3 +- .../common/logging/LoggersTests.java | 106 ++++++++++++++++++ .../discovery/ClusterDisruptionIT.java | 9 +- .../discovery/MasterDisruptionIT.java | 3 +- .../zen/NodeJoinControllerTests.java | 3 +- .../index/translog/TranslogTests.java | 5 +- ...ClusterStateServiceRandomUpdatesTests.java | 5 +- .../search/aggregations/metrics/StatsIT.java | 3 +- .../elasticsearch/search/geo/GeoFilterIT.java | 3 +- 112 files changed, 450 insertions(+), 655 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/common/logging/LoggersTests.java diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java index 541738d6be7cc..697849985afeb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.cluster.health; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActiveShardCount; @@ -104,7 +103,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS @Override public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); + logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e); listener.onFailure(e); } }); @@ -132,7 +131,7 @@ public void onNoLongerMaster(String source) { @Override public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); + logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e); listener.onFailure(e); } }); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java index 6e4d628ea5fc3..108ce586573d7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; @@ -112,7 +111,7 @@ public void onAckTimeout() { @Override public void onFailure(String source, Exception e) { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to perform [{}]", source), e); + logger.debug(() -> new ParameterizedMessage("failed to perform [{}]", source), e); super.onFailure(source, e); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java index edc30bd3c35fd..4cf74fbf865cc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.cluster.settings; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; @@ -160,7 +159,7 @@ public void onNoLongerMaster(String source) { @Override public void onFailure(String source, Exception e) { //if the reroute fails we only log - logger.debug((Supplier) () -> new ParameterizedMessage("failed to perform [{}]", source), e); + logger.debug(() -> new ParameterizedMessage("failed to perform [{}]", source), e); listener.onFailure(new ElasticsearchException("reroute after update settings failed", e)); } @@ -174,7 +173,7 @@ public ClusterState execute(final ClusterState currentState) { @Override public void onFailure(String source, Exception e) { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to perform [{}]", source), e); + logger.debug(() -> new ParameterizedMessage("failed to perform [{}]", source), e); super.onFailure(source, e); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java index 362f54b74ab36..0bd6370e88a57 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.indices.close; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DestructiveOperations; @@ -114,7 +113,7 @@ public void onResponse(ClusterStateUpdateResponse response) { @Override public void onFailure(Exception t) { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to close indices [{}]", (Object) concreteIndices), t); + logger.debug(() -> new ParameterizedMessage("failed to close indices [{}]", (Object) concreteIndices), t); listener.onFailure(t); } }); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java index f5c63bd470d40..a2e102e0689c5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.indices.delete; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DestructiveOperations; @@ -102,7 +101,7 @@ public void onResponse(ClusterStateUpdateResponse response) { @Override public void onFailure(Exception t) { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to delete indices [{}]", concreteIndices), t); + logger.debug(() -> new ParameterizedMessage("failed to delete indices [{}]", concreteIndices), t); listener.onFailure(t); } }); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java index e10a20096fa30..38cd5efe13ac0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.indices.mapping.put; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; @@ -93,12 +92,12 @@ public void onResponse(ClusterStateUpdateResponse response) { @Override public void onFailure(Exception t) { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to put mappings on indices [{}], type [{}]", concreteIndices, request.type()), t); + logger.debug(() -> new ParameterizedMessage("failed to put mappings on indices [{}], type [{}]", concreteIndices, request.type()), t); listener.onFailure(t); } }); } catch (IndexNotFoundException ex) { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to put mappings on indices [{}], type [{}]", request.indices(), request.type()), ex); + logger.debug(() -> new ParameterizedMessage("failed to put mappings on indices [{}], type [{}]", request.indices(), request.type()), ex); throw ex; } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java index 795e11c228839..1e89244b67644 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.indices.open; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DestructiveOperations; @@ -99,7 +98,7 @@ public void onResponse(OpenIndexClusterStateUpdateResponse response) { @Override public void onFailure(Exception t) { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to open indices [{}]", (Object) concreteIndices), t); + logger.debug(() -> new ParameterizedMessage("failed to open indices [{}]", (Object) concreteIndices), t); listener.onFailure(t); } }); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java index d20957c4bd29b..83eca83310339 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.indices.settings.put; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; @@ -94,7 +93,7 @@ public void onResponse(ClusterStateUpdateResponse response) { @Override public void onFailure(Exception t) { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to update settings on indices [{}]", (Object) concreteIndices), t); + logger.debug(() -> new ParameterizedMessage("failed to update settings on indices [{}]", (Object) concreteIndices), t); listener.onFailure(t); } }); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java index ad9f73b55b0cb..db5ddd326d736 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.indices.template.delete; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; @@ -75,7 +74,7 @@ public void onResponse(MetaDataIndexTemplateService.RemoveResponse response) { @Override public void onFailure(Exception e) { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to delete templates [{}]", request.name()), e); + logger.debug(() -> new ParameterizedMessage("failed to delete templates [{}]", request.name()), e); listener.onFailure(e); } }); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java index 1624c7950e7f2..7b46dc602d0ce 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.indices.template.put; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; @@ -97,7 +96,7 @@ public void onResponse(MetaDataIndexTemplateService.PutResponse response) { @Override public void onFailure(Exception e) { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to put template [{}]", request.name()), e); + logger.debug(() -> new ParameterizedMessage("failed to put template [{}]", request.name()), e); listener.onFailure(e); } }); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java index 02d58a9db7ece..2e428e85efc23 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.indices.upgrade.post; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; @@ -78,7 +77,7 @@ public void onResponse(ClusterStateUpdateResponse response) { @Override public void onFailure(Exception t) { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to upgrade minimum compatibility version settings on indices [{}]", request.versions().keySet()), t); + logger.debug(() -> new ParameterizedMessage("failed to upgrade minimum compatibility version settings on indices [{}]", request.versions().keySet()), t); listener.onFailure(t); } }); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java index 423648bbb7105..adb1d32161fe1 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java @@ -20,7 +20,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; @@ -89,10 +88,10 @@ public void onFailure(Exception e) { } } catch (InterruptedException e) { Thread.currentThread().interrupt(); - logger.info((Supplier) () -> new ParameterizedMessage("Bulk request {} has been cancelled.", executionId), e); + logger.info(() -> new ParameterizedMessage("Bulk request {} has been cancelled.", executionId), e); listener.afterBulk(executionId, bulkRequest, e); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("Failed to execute bulk request {}.", executionId), e); + logger.warn(() -> new ParameterizedMessage("Failed to execute bulk request {}.", executionId), e); listener.afterBulk(executionId, bulkRequest, e); } finally { if (bulkRequestSetupSuccessful == false) { // if we fail on client.bulk() release the semaphore diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index f756c629b9832..5a3544377155c 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.bulk; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.SparseFixedBitSet; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; @@ -494,7 +493,7 @@ void processBulkIndexIngestRequest(Task task, BulkRequest original, ActionListen long ingestStartTimeInNanos = System.nanoTime(); BulkRequestModifier bulkRequestModifier = new BulkRequestModifier(original); ingestService.getPipelineExecutionService().executeBulkRequest(() -> bulkRequestModifier, (indexRequest, exception) -> { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to execute pipeline [{}] for document [{}/{}/{}]", + logger.debug(() -> new ParameterizedMessage("failed to execute pipeline [{}] for document [{}/{}/{}]", indexRequest.getPipeline(), indexRequest.index(), indexRequest.type(), indexRequest.id()), exception); bulkRequestModifier.markCurrentItemAsFailed(exception); }, (exception) -> { diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index e66df2b0d9267..7221118d2ef50 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteResponse; @@ -197,10 +196,10 @@ static BulkItemResponse createPrimaryResponse(BulkItemResultHolder bulkItemResul DocWriteRequest docWriteRequest = replicaRequest.request(); Exception failure = operationResult.getFailure(); if (isConflictException(failure)) { - logger.trace((Supplier) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", + logger.trace(() -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", request.shardId(), docWriteRequest.opType().getLowercase(), request), failure); } else { - logger.debug((Supplier) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", + logger.debug(() -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", request.shardId(), docWriteRequest.opType().getLowercase(), request), failure); } diff --git a/server/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java b/server/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java index 8353c5dc389d9..d15b7b92d62aa 100644 --- a/server/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java +++ b/server/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.get; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.TransportActions; @@ -95,7 +94,7 @@ protected MultiGetShardResponse shardOperation(MultiGetShardRequest request, Sha if (TransportActions.isShardNotAvailableException(e)) { throw (ElasticsearchException) e; } else { - logger.debug((Supplier) () -> new ParameterizedMessage("{} failed to execute multi_get for [{}]/[{}]", shardId, + logger.debug(() -> new ParameterizedMessage("{} failed to execute multi_get for [{}]/[{}]", shardId, item.type(), item.id()), e); response.add(request.locations.get(i), new MultiGetResponse.Failure(request.index(), item.type(), item.id(), e)); } diff --git a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index d6d7cea7704fc..aad2638bd9de3 100644 --- a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; @@ -125,10 +124,7 @@ public final void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPha final ShardOperationFailedException[] shardSearchFailures = ExceptionsHelper.groupBy(buildShardFailures()); Throwable cause = shardSearchFailures.length == 0 ? null : ElasticsearchException.guessRootCauses(shardSearchFailures[0].getCause())[0]; - if (logger.isDebugEnabled()) { - logger.debug((Supplier) () -> new ParameterizedMessage("All shards failed for phase: [{}]", getName()), - cause); - } + logger.debug(() -> new ParameterizedMessage("All shards failed for phase: [{}]", getName()), cause); onPhaseFailure(currentPhase, "all shards failed", cause); } else { Boolean allowPartialResults = request.allowPartialSearchResults(); @@ -138,9 +134,8 @@ public final void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPha final ShardOperationFailedException[] shardSearchFailures = ExceptionsHelper.groupBy(buildShardFailures()); Throwable cause = shardSearchFailures.length == 0 ? null : ElasticsearchException.guessRootCauses(shardSearchFailures[0].getCause())[0]; - logger.debug((Supplier) () -> new ParameterizedMessage("{} shards failed for phase: [{}]", - shardSearchFailures.length, getName()), - cause); + logger.debug(() -> new ParameterizedMessage("{} shards failed for phase: [{}]", + shardSearchFailures.length, getName()), cause); } onPhaseFailure(currentPhase, "Partial shards failure", null); } else { @@ -160,10 +155,7 @@ private void executePhase(SearchPhase phase) { phase.run(); } catch (Exception e) { if (logger.isDebugEnabled()) { - logger.debug( - (Supplier) () -> new ParameterizedMessage( - "Failed to execute [{}] while moving to [{}] phase", request, phase.getName()), - e); + logger.debug(new ParameterizedMessage("Failed to execute [{}] while moving to [{}] phase", request, phase.getName()), e); } onPhaseFailure(phase, "", e); } diff --git a/server/src/main/java/org/elasticsearch/action/search/ClearScrollController.java b/server/src/main/java/org/elasticsearch/action/search/ClearScrollController.java index ac708d9b6b0c7..9b98691dc9005 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ClearScrollController.java +++ b/server/src/main/java/org/elasticsearch/action/search/ClearScrollController.java @@ -20,7 +20,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -133,7 +132,7 @@ private void onFreedContext(boolean freed) { } private void onFailedFreedContext(Throwable e, DiscoveryNode node) { - logger.warn((Supplier) () -> new ParameterizedMessage("Clear SC failed on node[{}]", node), e); + logger.warn(() -> new ParameterizedMessage("Clear SC failed on node[{}]", node), e); if (expectedOps.countDown()) { listener.onResponse(new ClearScrollResponse(false, freedSearchContexts.get())); } else { diff --git a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java index db0425db7c320..1d8d702520e4c 100644 --- a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.search; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; @@ -87,10 +86,8 @@ protected void innerOnResponse(QuerySearchResult response) { @Override public void onFailure(Exception exception) { try { - if (context.getLogger().isDebugEnabled()) { - context.getLogger().debug((Supplier) () -> new ParameterizedMessage("[{}] Failed to execute query phase", - querySearchRequest.id()), exception); - } + context.getLogger().debug(() -> new ParameterizedMessage("[{}] Failed to execute query phase", + querySearchRequest.id()), exception); counter.onFailure(shardIndex, searchShardTarget, exception); } finally { // the query might not have been executed at all (for example because thread pool rejected diff --git a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java index 4712496bc37ec..920353abcf808 100644 --- a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java @@ -21,7 +21,6 @@ import com.carrotsearch.hppc.IntArrayList; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.search.ScoreDoc; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.OriginalIndices; @@ -169,10 +168,7 @@ public void innerOnResponse(FetchSearchResult result) { @Override public void onFailure(Exception e) { try { - if (logger.isDebugEnabled()) { - logger.debug((Supplier) () -> new ParameterizedMessage("[{}] Failed to execute fetch phase", - fetchSearchRequest.id()), e); - } + logger.debug(() -> new ParameterizedMessage("[{}] Failed to execute fetch phase", fetchSearchRequest.id()), e); counter.onFailure(shardIndex, shardTarget, e); } finally { // the search context might not be cleared on the node where the fetch was executed for example diff --git a/server/src/main/java/org/elasticsearch/action/search/InitialSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/InitialSearchPhase.java index 01f31d4c7439f..559c7ca102e6b 100644 --- a/server/src/main/java/org/elasticsearch/action/search/InitialSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/InitialSearchPhase.java @@ -20,7 +20,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.cluster.routing.GroupShardsIterator; @@ -93,15 +92,10 @@ private void onShardFailure(final int shardIndex, @Nullable ShardRouting shard, if (totalOps.incrementAndGet() == expectedTotalOps) { if (logger.isDebugEnabled()) { if (e != null && !TransportActions.isShardNotAvailableException(e)) { - logger.debug( - (Supplier) () -> new ParameterizedMessage( - "{}: Failed to execute [{}]", - shard != null ? shard.shortSummary() : - shardIt.shardId(), - request), - e); + logger.debug(new ParameterizedMessage( + "{}: Failed to execute [{}]", shard != null ? shard.shortSummary() : shardIt.shardId(), request), e); } else if (logger.isTraceEnabled()) { - logger.trace((Supplier) () -> new ParameterizedMessage("{}: Failed to execute [{}]", shard, request), e); + logger.trace(new ParameterizedMessage("{}: Failed to execute [{}]", shard, request), e); } } onPhaseDone(); @@ -109,13 +103,9 @@ private void onShardFailure(final int shardIndex, @Nullable ShardRouting shard, final ShardRouting nextShard = shardIt.nextOrNull(); final boolean lastShard = nextShard == null; // trace log this exception - logger.trace( - (Supplier) () -> new ParameterizedMessage( - "{}: Failed to execute [{}] lastShard [{}]", - shard != null ? shard.shortSummary() : shardIt.shardId(), - request, - lastShard), - e); + logger.trace(() -> new ParameterizedMessage( + "{}: Failed to execute [{}] lastShard [{}]", + shard != null ? shard.shortSummary() : shardIt.shardId(), request, lastShard), e); if (!lastShard) { performPhaseOnShard(shardIndex, shardIt, nextShard); } else { @@ -123,14 +113,9 @@ private void onShardFailure(final int shardIndex, @Nullable ShardRouting shard, // no more shards active, add a failure if (logger.isDebugEnabled() && !logger.isTraceEnabled()) { // do not double log this exception if (e != null && !TransportActions.isShardNotAvailableException(e)) { - logger.debug( - (Supplier) () -> new ParameterizedMessage( - "{}: Failed to execute [{}] lastShard [{}]", - shard != null ? shard.shortSummary() : - shardIt.shardId(), - request, - lastShard), - e); + logger.debug(new ParameterizedMessage( + "{}: Failed to execute [{}] lastShard [{}]", + shard != null ? shard.shortSummary() : shardIt.shardId(), request, lastShard), e); } } } diff --git a/server/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java b/server/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java index 10719fcb91c6a..c584db106992c 100644 --- a/server/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java @@ -91,13 +91,8 @@ public void onFailure(Exception e) { try { channel.sendResponse(e); } catch (Exception e1) { - logger.warn( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage( - "Failed to send error response for action [{}] and request [{}]", - actionName, - request), - e1); + logger.warn(() -> new ParameterizedMessage( + "Failed to send error response for action [{}] and request [{}]", actionName, request), e1); } } }); diff --git a/server/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java b/server/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java index 759693e550e1e..dfcf6445abf7d 100644 --- a/server/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java +++ b/server/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.Client; import org.elasticsearch.client.transport.TransportClient; @@ -120,8 +119,7 @@ protected void doRun() throws Exception { @Override public void onFailure(Exception e) { - logger.warn( - (Supplier) () -> new ParameterizedMessage("failed to execute failure callback on [{}]", listener), e); + logger.warn(() -> new ParameterizedMessage("failed to execute failure callback on [{}]", listener), e); } }); } diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java index 53764f4ee88d6..0961ab74c4703 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java @@ -222,13 +222,8 @@ void onOperation(@Nullable ShardRouting shard, final ShardIterator shardIt, int if (e != null) { if (logger.isTraceEnabled()) { if (!TransportActions.isShardNotAvailableException(e)) { - logger.trace( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage( - "{}: failed to execute [{}]", - shard != null ? shard.shortSummary() : shardIt.shardId(), - request), - e); + logger.trace(new ParameterizedMessage( + "{}: failed to execute [{}]", shard != null ? shard.shortSummary() : shardIt.shardId(), request), e); } } } @@ -237,13 +232,8 @@ void onOperation(@Nullable ShardRouting shard, final ShardIterator shardIt, int if (logger.isDebugEnabled()) { if (e != null) { if (!TransportActions.isShardNotAvailableException(e)) { - logger.debug( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage( - "{}: failed to execute [{}]", - shard != null ? shard.shortSummary() : shardIt.shardId(), - request), - e); + logger.debug(new ParameterizedMessage( + "{}: failed to execute [{}]", shard != null ? shard.shortSummary() : shardIt.shardId(), request), e); } } } diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java index b6eaa5163c865..ff4e73acc1877 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java @@ -362,9 +362,7 @@ protected void onNodeResponse(DiscoveryNode node, int nodeIndex, NodeResponse re protected void onNodeFailure(DiscoveryNode node, int nodeIndex, Throwable t) { String nodeId = node.getId(); if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) { - logger.debug( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("failed to execute [{}] on node [{}]", actionName, nodeId), t); + logger.debug(new ParameterizedMessage("failed to execute [{}] on node [{}]", actionName, nodeId), t); } // this is defensive to protect against the possibility of double invocation @@ -441,23 +439,13 @@ private void onShardOperation(final NodeRequest request, final Object[] shardRes shardResults[shardIndex] = failure; if (TransportActions.isShardNotAvailableException(e)) { if (logger.isTraceEnabled()) { - logger.trace( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage( - "[{}] failed to execute operation for shard [{}]", - actionName, - shardRouting.shortSummary()), - e); + logger.trace(new ParameterizedMessage( + "[{}] failed to execute operation for shard [{}]", actionName, shardRouting.shortSummary()), e); } } else { if (logger.isDebugEnabled()) { - logger.debug( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage( - "[{}] failed to execute operation for shard [{}]", - actionName, - shardRouting.shortSummary()), - e); + logger.debug(new ParameterizedMessage( + "[{}] failed to execute operation for shard [{}]", actionName, shardRouting.shortSummary()), e); } } } diff --git a/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java b/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java index feb47aa34fd86..f4a26e723dc0a 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java @@ -171,7 +171,7 @@ public void onResponse(Response response) { public void onFailure(Exception t) { if (t instanceof Discovery.FailedToCommitClusterStateException || (t instanceof NotMasterException)) { - logger.debug((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage("master could not publish cluster state or stepped down before publishing action [{}], scheduling a retry", actionName), t); + logger.debug(() -> new ParameterizedMessage("master could not publish cluster state or stepped down before publishing action [{}], scheduling a retry", actionName), t); retry(t, masterChangePredicate); } else { listener.onFailure(t); @@ -226,7 +226,7 @@ public void onClusterServiceClose() { @Override public void onTimeout(TimeValue timeout) { - logger.debug((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage("timed out while retrying [{}] after failure (timeout [{}])", actionName, timeout), failure); + logger.debug(() -> new ParameterizedMessage("timed out while retrying [{}] after failure (timeout [{}])", actionName, timeout), failure); listener.onFailure(new MasterNotDiscoveredException(failure)); } }, statePredicate diff --git a/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java b/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java index 4583e47bc1db7..0b61c7ed71247 100644 --- a/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java @@ -232,9 +232,7 @@ private void onOperation(int idx, NodeResponse nodeResponse) { private void onFailure(int idx, String nodeId, Throwable t) { if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) { - logger.debug( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("failed to execute on node [{}]", nodeId), t); + logger.debug(new ParameterizedMessage("failed to execute on node [{}]", nodeId), t); } responses.set(idx, new FailedNodeException(nodeId, "Failed node [" + nodeId + "]", t)); if (counter.incrementAndGet() == responses.length()) { diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index c29ca5c1d0853..340496ca35363 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -178,7 +178,7 @@ public void onResponse(ReplicaResponse response) { @Override public void onFailure(Exception replicaException) { - logger.trace((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( + logger.trace(() -> new ParameterizedMessage( "[{}] failure while performing [{}] on replica {}, request [{}]", shard.shardId(), opType, shard, replicaRequest), replicaException); // Only report "critical" exceptions - TODO: Reach out to the master node to get the latest shard state then report. diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index 2cd5f7a5f13ac..aca8ed4973263 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -265,9 +265,7 @@ public void onFailure(Exception e) { channel.sendResponse(e); } catch (Exception inner) { inner.addSuppressed(e); - logger.warn( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("Failed to send response for {}", actionName), inner); + logger.warn(() -> new ParameterizedMessage("Failed to send response for {}", actionName), inner); } } }); @@ -579,7 +577,6 @@ public void onResponse(Releasable releasable) { public void onFailure(Exception e) { if (e instanceof RetryOnReplicaException) { logger.trace( - (org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( "Retrying operation on replica, action [{}], request [{}]", transportReplicaAction, @@ -621,12 +618,8 @@ protected void responseWithFailure(Exception e) { channel.sendResponse(e); } catch (IOException responseException) { responseException.addSuppressed(e); - logger.warn( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage( - "failed to send error message back to client for action [{}]", - transportReplicaAction), - responseException); + logger.warn(() -> new ParameterizedMessage( + "failed to send error message back to client for action [{}]", transportReplicaAction), responseException); } } @@ -854,12 +847,9 @@ public void handleException(TransportException exp) { final Throwable cause = exp.unwrapCause(); if (cause instanceof ConnectTransportException || cause instanceof NodeClosedException || (isPrimaryAction && retryPrimaryException(cause))) { - logger.trace( - (org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( + logger.trace(() -> new ParameterizedMessage( "received an error from node [{}] for request [{}], scheduling a retry", - node.getId(), - requestToPerform), - exp); + node.getId(), requestToPerform), exp); retry(exp); } else { finishAsFailed(exp); @@ -903,9 +893,7 @@ public void onTimeout(TimeValue timeout) { void finishAsFailed(Exception failure) { if (finished.compareAndSet(false, true)) { setPhase(task, "failed"); - logger.trace( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("operation failed. action [{}], request [{}]", actionName, request), failure); + logger.trace(() -> new ParameterizedMessage("operation failed. action [{}], request [{}]", actionName, request), failure); listener.onFailure(failure); } else { assert false : "finishAsFailed called but operation is already finished"; @@ -913,13 +901,9 @@ void finishAsFailed(Exception failure) { } void finishWithUnexpectedFailure(Exception failure) { - logger.warn( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage( + logger.warn(() -> new ParameterizedMessage( "unexpected error during the primary phase for action [{}], request [{}]", - actionName, - request), - failure); + actionName, request), failure); if (finished.compareAndSet(false, true)) { setPhase(task, "failed"); listener.onFailure(failure); diff --git a/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java b/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java index 528eacac3dafb..e9e0a0b1922e7 100644 --- a/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java @@ -204,10 +204,8 @@ public void handleException(TransportException exp) { } private void onFailure(ShardRouting shardRouting, Exception e) { - if (logger.isTraceEnabled() && e != null) { - logger.trace( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("{}: failed to execute [{}]", shardRouting, internalRequest.request()), e); + if (e != null) { + logger.trace(() -> new ParameterizedMessage("{}: failed to execute [{}]", shardRouting, internalRequest.request()), e); } perform(e); } @@ -224,11 +222,7 @@ private void perform(@Nullable final Exception currentFailure) { if (failure == null || isShardNotAvailableException(failure)) { failure = new NoShardAvailableActionException(null, LoggerMessageFormat.format("No shard available for [{}]", internalRequest.request()), failure); } else { - if (logger.isDebugEnabled()) { - logger.debug( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("{}: failed to execute [{}]", null, internalRequest.request()), failure); - } + logger.debug(() -> new ParameterizedMessage("{}: failed to execute [{}]", null, internalRequest.request()), failure); } listener.onFailure(failure); return; diff --git a/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java b/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java index 35b2b41dfda6e..aad7d20073c3b 100644 --- a/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java @@ -315,9 +315,7 @@ private void onOperation(int idx, NodeTasksResponse nodeResponse) { private void onFailure(int idx, String nodeId, Throwable t) { if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) { - logger.debug( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("failed to execute on node [{}]", nodeId), t); + logger.debug(new ParameterizedMessage("failed to execute on node [{}]", nodeId), t); } responses.set(idx, new FailedNodeException(nodeId, "Failed node [" + nodeId + "]", t)); diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java b/server/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java index 8c1d06113d684..b83ac3881fda5 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.termvectors; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.TransportActions; @@ -89,7 +88,7 @@ protected MultiTermVectorsShardResponse shardOperation(MultiTermVectorsShardRequ if (TransportActions.isShardNotAvailableException(t)) { throw (ElasticsearchException) t; } else { - logger.debug((Supplier) () -> new ParameterizedMessage("{} failed to execute multi term vectors for [{}]/[{}]", shardId, termVectorsRequest.type(), termVectorsRequest.id()), t); + logger.debug(() -> new ParameterizedMessage("{} failed to execute multi term vectors for [{}]/[{}]", shardId, termVectorsRequest.type(), termVectorsRequest.id()), t); response.add(request.locations.get(i), new MultiTermVectorsResponse.Failure(request.index(), termVectorsRequest.type(), termVectorsRequest.id(), t)); } diff --git a/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java b/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java index 5335b4be8b4e2..19fdb8837d69b 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.Constants; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.PathUtils; @@ -428,15 +427,11 @@ long getMaxMapCount(Logger logger) { try { return parseProcSysVmMaxMapCount(rawProcSysVmMaxMapCount); } catch (final NumberFormatException e) { - logger.warn( - (Supplier) () -> new ParameterizedMessage( - "unable to parse vm.max_map_count [{}]", - rawProcSysVmMaxMapCount), - e); + logger.warn(() -> new ParameterizedMessage("unable to parse vm.max_map_count [{}]", rawProcSysVmMaxMapCount), e); } } } catch (final IOException e) { - logger.warn((Supplier) () -> new ParameterizedMessage("I/O exception while trying to read [{}]", path), e); + logger.warn(() -> new ParameterizedMessage("I/O exception while trying to read [{}]", path), e); } return -1; } diff --git a/server/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java b/server/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java index 6869a6abb710f..857ff65b6c2b8 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java @@ -71,15 +71,12 @@ static boolean isFatalUncaught(Throwable e) { void onFatalUncaught(final String threadName, final Throwable t) { final Logger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get()); - logger.error( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("fatal error in thread [{}], exiting", threadName), t); + logger.error(() -> new ParameterizedMessage("fatal error in thread [{}], exiting", threadName), t); } void onNonFatalUncaught(final String threadName, final Throwable t) { final Logger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get()); - logger.warn((org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("uncaught exception in thread [{}]", threadName), t); + logger.warn(() -> new ParameterizedMessage("uncaught exception in thread [{}]", threadName), t); } void halt(int status) { diff --git a/server/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java b/server/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java index 5d31e74bef621..109efb400bc93 100644 --- a/server/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java +++ b/server/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java @@ -373,7 +373,7 @@ protected List validateNewNodes(Set nodes) { transportService.connectToNode(node); } catch (Exception e) { it.remove(); - logger.debug((Supplier) () -> new ParameterizedMessage("failed to connect to discovered node [{}]", node), e); + logger.debug(() -> new ParameterizedMessage("failed to connect to discovered node [{}]", node), e); } } } @@ -428,13 +428,10 @@ public LivenessResponse newInstance() { nodeWithInfo.getAttributes(), nodeWithInfo.getRoles(), nodeWithInfo.getVersion())); } } catch (ConnectTransportException e) { - logger.debug( - (Supplier) - () -> new ParameterizedMessage("failed to connect to node [{}], ignoring...", listedNode), e); + logger.debug(() -> new ParameterizedMessage("failed to connect to node [{}], ignoring...", listedNode), e); hostFailureListener.onNodeDisconnected(listedNode, e); } catch (Exception e) { - logger.info( - (Supplier) () -> new ParameterizedMessage("failed to get node info for {}, disconnecting...", listedNode), e); + logger.info(() -> new ParameterizedMessage("failed to get node info for {}, disconnecting...", listedNode), e); } } @@ -481,12 +478,10 @@ void onDone() { public void onFailure(Exception e) { onDone(); if (e instanceof ConnectTransportException) { - logger.debug((Supplier) - () -> new ParameterizedMessage("failed to connect to node [{}], ignoring...", nodeToPing), e); + logger.debug(() -> new ParameterizedMessage("failed to connect to node [{}], ignoring...", nodeToPing), e); hostFailureListener.onNodeDisconnected(nodeToPing, e); } else { - logger.info( - (Supplier) () -> new ParameterizedMessage( + logger.info(() -> new ParameterizedMessage( "failed to get local cluster state info for {}, disconnecting...", nodeToPing), e); } } @@ -530,8 +525,7 @@ public void handleResponse(ClusterStateResponse response) { @Override public void handleException(TransportException e) { - logger.info( - (Supplier) () -> new ParameterizedMessage( + logger.info(() -> new ParameterizedMessage( "failed to get local cluster state for {}, disconnecting...", nodeToPing), e); try { hostFailureListener.onNodeDisconnected(nodeToPing, e); diff --git a/server/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java b/server/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java index aab75eb2aad7b..998cd5ba0a870 100644 --- a/server/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.component.AbstractLifecycleComponent; @@ -98,7 +97,7 @@ public void onFailure(Exception e) { // will try again after `cluster.nodes.reconnect_interval` on all nodes but the current master. // On the master, node fault detection will remove these nodes from the cluster as their are not // connected. Note that it is very rare that we end up here on the master. - logger.warn((Supplier) () -> new ParameterizedMessage("failed to connect to {}", node), e); + logger.warn(() -> new ParameterizedMessage("failed to connect to {}", node), e); } @Override @@ -137,7 +136,7 @@ public void disconnectFromNodesExcept(DiscoveryNodes nodesToKeep) { try { transportService.disconnectFromNode(node); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to disconnect to node [{}]", node), e); + logger.warn(() -> new ParameterizedMessage("failed to disconnect to node [{}]", node), e); } } } @@ -160,9 +159,7 @@ void validateAndConnectIfNeeded(DiscoveryNode node) { // log every 6th failure if ((nodeFailureCount % 6) == 1) { final int finalNodeFailureCount = nodeFailureCount; - logger.warn( - (Supplier) - () -> new ParameterizedMessage( + logger.warn(() -> new ParameterizedMessage( "failed to connect to node {} (tried [{}] times)", node, finalNodeFailureCount), e); } nodes.put(node, nodeFailureCount); diff --git a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index f29841e3744a9..915e900b9ddf1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; @@ -205,7 +204,7 @@ private static class ShardFailedTransportHandler implements TransportRequestHand @Override public void messageReceived(FailedShardEntry request, TransportChannel channel) throws Exception { - logger.debug((Supplier) () -> new ParameterizedMessage("{} received shard failed for {}", request.shardId, request), request.failure); + logger.debug(() -> new ParameterizedMessage("{} received shard failed for {}", request.shardId, request), request.failure); clusterService.submitStateUpdateTask( "shard-failed", request, @@ -214,12 +213,12 @@ public void messageReceived(FailedShardEntry request, TransportChannel channel) new ClusterStateTaskListener() { @Override public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("{} unexpected failure while failing shard [{}]", request.shardId, request), e); + logger.error(() -> new ParameterizedMessage("{} unexpected failure while failing shard [{}]", request.shardId, request), e); try { channel.sendResponse(e); } catch (Exception channelException) { channelException.addSuppressed(e); - logger.warn((Supplier) () -> new ParameterizedMessage("{} failed to send failure [{}] while failing shard [{}]", request.shardId, e, request), channelException); + logger.warn(() -> new ParameterizedMessage("{} failed to send failure [{}] while failing shard [{}]", request.shardId, e, request), channelException); } } @@ -229,7 +228,7 @@ public void onNoLongerMaster(String source) { try { channel.sendResponse(new NotMasterException(source)); } catch (Exception channelException) { - logger.warn((Supplier) () -> new ParameterizedMessage("{} failed to send no longer master while failing shard [{}]", request.shardId, request), channelException); + logger.warn(() -> new ParameterizedMessage("{} failed to send no longer master while failing shard [{}]", request.shardId, request), channelException); } } @@ -238,7 +237,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS try { channel.sendResponse(TransportResponse.Empty.INSTANCE); } catch (Exception channelException) { - logger.warn((Supplier) () -> new ParameterizedMessage("{} failed to send response while failing shard [{}]", request.shardId, request), channelException); + logger.warn(() -> new ParameterizedMessage("{} failed to send response while failing shard [{}]", request.shardId, request), channelException); } } } @@ -323,7 +322,7 @@ public ClusterTasksResult execute(ClusterState currentState, L maybeUpdatedState = applyFailedShards(currentState, failedShardsToBeApplied, staleShardsToBeApplied); batchResultBuilder.successes(tasksToBeApplied); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to apply failed shards {}", failedShardsToBeApplied), e); + logger.warn(() -> new ParameterizedMessage("failed to apply failed shards {}", failedShardsToBeApplied), e); // failures are communicated back to the requester // cluster state will not be updated in this case batchResultBuilder.failures(tasksToBeApplied, e); @@ -501,7 +500,7 @@ public ClusterTasksResult execute(ClusterState currentState, maybeUpdatedState = allocationService.applyStartedShards(currentState, shardRoutingsToBeApplied); builder.successes(tasksToBeApplied); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to apply started shards {}", shardRoutingsToBeApplied), e); + logger.warn(() -> new ParameterizedMessage("failed to apply started shards {}", shardRoutingsToBeApplied), e); builder.failures(tasksToBeApplied, e); } @@ -510,7 +509,7 @@ public ClusterTasksResult execute(ClusterState currentState, @Override public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); + logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 344c424a62484..41120115c792e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -23,7 +23,6 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ResourceAlreadyExistsException; @@ -558,9 +557,9 @@ public ClusterState execute(ClusterState currentState) throws Exception { @Override public void onFailure(String source, Exception e) { if (e instanceof ResourceAlreadyExistsException) { - logger.trace((Supplier) () -> new ParameterizedMessage("[{}] failed to create", request.index()), e); + logger.trace(() -> new ParameterizedMessage("[{}] failed to create", request.index()), e); } else { - logger.debug((Supplier) () -> new ParameterizedMessage("[{}] failed to create", request.index()), e); + logger.debug(() -> new ParameterizedMessage("[{}] failed to create", request.index()), e); } super.onFailure(source, e); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java index a9301056f5ae0..4b6a898a3a9f1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.metadata; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.analysis.Analyzer; import org.elasticsearch.Version; import org.elasticsearch.common.component.AbstractComponent; @@ -208,7 +207,7 @@ IndexMetaData archiveBrokenIndexSettings(IndexMetaData indexMetaData) { final Settings upgrade = indexScopedSettings.archiveUnknownOrInvalidSettings( settings, e -> logger.warn("{} ignoring unknown index setting: [{}] with value [{}]; archiving", indexMetaData.getIndex(), e.getKey(), e.getValue()), - (e, ex) -> logger.warn((Supplier) () -> new ParameterizedMessage("{} ignoring invalid index setting: [{}] with value [{}]; archiving", indexMetaData.getIndex(), e.getKey(), e.getValue()), ex)); + (e, ex) -> logger.warn(() -> new ParameterizedMessage("{} ignoring invalid index setting: [{}] with value [{}]; archiving", indexMetaData.getIndex(), e.getKey(), e.getValue()), ex)); if (upgrade != settings) { return IndexMetaData.builder(indexMetaData).settings(upgrade).build(); } else { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index 6c6c6ca33e461..829504c154e41 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -191,7 +191,7 @@ private boolean refreshIndexMapping(IndexService indexService, IndexMetaData.Bui } } } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to refresh-mapping in cluster state", index), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to refresh-mapping in cluster state", index), e); } return dirty; } @@ -205,7 +205,7 @@ public void refreshMapping(final String index, final String indexUUID) { refreshTask, ClusterStateTaskConfig.build(Priority.HIGH), refreshExecutor, - (source, e) -> logger.warn((Supplier) () -> new ParameterizedMessage("failure during [{}]", source), e) + (source, e) -> logger.warn(() -> new ParameterizedMessage("failure during [{}]", source), e) ); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java index 1c3d629a72fea..0bcefa9fc7248 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.routing; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; @@ -109,16 +108,16 @@ public void onFailure(String source, Exception e) { rerouting.set(false); ClusterState state = clusterService.state(); if (logger.isTraceEnabled()) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}], current state:\n{}", source, state), e); + logger.error(() -> new ParameterizedMessage("unexpected failure during [{}], current state:\n{}", source, state), e); } else { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}], current state version [{}]", source, state.version()), e); + logger.error(() -> new ParameterizedMessage("unexpected failure during [{}], current state version [{}]", source, state.version()), e); } } }); } catch (Exception e) { rerouting.set(false); ClusterState state = clusterService.state(); - logger.warn((Supplier) () -> new ParameterizedMessage("failed to reroute routing table, current state:\n{}", state), e); + logger.warn(() -> new ParameterizedMessage("failed to reroute routing table, current state:\n{}", state), e); } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java b/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java index ae79b779045f4..01fa5837387c8 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java @@ -316,7 +316,7 @@ public void runOnApplierThread(final String source, Consumer clust } @Override - public void onNewClusterState(final String source, final java.util.function.Supplier clusterStateSupplier, + public void onNewClusterState(final String source, final Supplier clusterStateSupplier, final ClusterStateTaskListener listener) { Function applyFunction = currentState -> { ClusterState nextState = clusterStateSupplier.get(); @@ -401,7 +401,7 @@ protected void runTask(UpdateTask task) { } catch (Exception e) { TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS))); if (logger.isTraceEnabled()) { - logger.trace(new ParameterizedMessage( + logger.trace(() -> new ParameterizedMessage( "failed to execute cluster state applier in [{}], state:\nversion [{}], source [{}]\n{}{}{}", executionTime, previousClusterState.version(), @@ -439,8 +439,7 @@ protected void runTask(UpdateTask task) { final long version = newClusterState.version(); final String stateUUID = newClusterState.stateUUID(); final String fullState = newClusterState.toString(); - logger.warn( - (org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( + logger.warn(() -> new ParameterizedMessage( "failed to apply updated cluster state in [{}]:\nversion [{}], uuid [{}], source [{}]\n{}", executionTime, version, diff --git a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java index 6858866d2dc88..20a6602b5c5ad 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.Assertions; import org.elasticsearch.cluster.AckedClusterStateTaskListener; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -226,10 +225,8 @@ protected void runTasks(TaskInputs taskInputs) { clusterStatePublisher.accept(clusterChangedEvent, taskOutputs.createAckListener(threadPool, newClusterState)); } catch (Discovery.FailedToCommitClusterStateException t) { final long version = newClusterState.version(); - logger.warn( - (Supplier) () -> new ParameterizedMessage( - "failing [{}]: failed to commit cluster state version [{}]", summary, version), - t); + logger.warn(() -> new ParameterizedMessage( + "failing [{}]: failed to commit cluster state version [{}]", summary, version), t); taskOutputs.publishingFailed(t); return; } @@ -239,11 +236,9 @@ protected void runTasks(TaskInputs taskInputs) { try { taskOutputs.clusterStatePublished(clusterChangedEvent); } catch (Exception e) { - logger.error( - (Supplier) () -> new ParameterizedMessage( + logger.error(() -> new ParameterizedMessage( "exception thrown while notifying executor of new cluster state publication [{}]", - summary), - e); + summary), e); } TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS))); logger.debug("processing [{}]: took [{}] done publishing updated cluster state (version: {}, uuid: {})", summary, @@ -255,8 +250,7 @@ protected void runTasks(TaskInputs taskInputs) { final long version = newClusterState.version(); final String stateUUID = newClusterState.stateUUID(); final String fullState = newClusterState.toString(); - logger.warn( - (Supplier) () -> new ParameterizedMessage( + logger.warn(() -> new ParameterizedMessage( "failed to publish updated cluster state in [{}]:\nversion [{}], uuid [{}], source [{}]\n{}", executionTime, version, @@ -473,8 +467,7 @@ public void onFailure(String source, Exception e) { listener.onFailure(source, e); } catch (Exception inner) { inner.addSuppressed(e); - logger.error( - (Supplier) () -> new ParameterizedMessage( + logger.error(() -> new ParameterizedMessage( "exception thrown by listener notifying of failure from [{}]", source), inner); } } @@ -484,8 +477,7 @@ public void onNoLongerMaster(String source) { try { listener.onNoLongerMaster(source); } catch (Exception e) { - logger.error( - (Supplier) () -> new ParameterizedMessage( + logger.error(() -> new ParameterizedMessage( "exception thrown by listener while notifying no longer master from [{}]", source), e); } } @@ -495,12 +487,9 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS try { listener.clusterStateProcessed(source, oldState, newState); } catch (Exception e) { - logger.error( - (Supplier) () -> new ParameterizedMessage( + logger.error(() -> new ParameterizedMessage( "exception thrown by listener while notifying of cluster state processed from [{}], old cluster state:\n" + - "{}\nnew cluster state:\n{}", - source, oldState, newState), - e); + "{}\nnew cluster state:\n{}", source, oldState, newState), e); } } } @@ -614,10 +603,8 @@ public void onNodeAck(DiscoveryNode node, @Nullable Exception e) { logger.trace("ack received from node [{}], cluster_state update (version: {})", node, clusterStateVersion); } else { this.lastFailure = e; - logger.debug( - (Supplier) () -> new ParameterizedMessage( - "ack received from node [{}], cluster_state update (version: {})", node, clusterStateVersion), - e); + logger.debug(() -> new ParameterizedMessage( + "ack received from node [{}], cluster_state update (version: {})", node, clusterStateVersion), e); } if (countDown.countDown()) { @@ -650,7 +637,7 @@ protected ClusterTasksResult executeTasks(TaskInputs taskInputs, long st TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS))); if (logger.isTraceEnabled()) { logger.trace( - (Supplier) () -> new ParameterizedMessage( + () -> new ParameterizedMessage( "failed to execute cluster state update in [{}], state:\nversion [{}], source [{}]\n{}{}{}", executionTime, previousClusterState.version(), diff --git a/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java index ba1450d1fb83c..5bef7bee4f10b 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.analysis.core.KeywordAnalyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.codecs.CodecUtil; @@ -111,7 +110,7 @@ public static Version parseVersion(@Nullable String version, Version defaultVers try { return Version.parse(version); } catch (ParseException e) { - logger.warn((Supplier) () -> new ParameterizedMessage("no version match {}, default to {}", version, defaultVersion), e); + logger.warn(() -> new ParameterizedMessage("no version match {}, default to {}", version, defaultVersion), e); return defaultVersion; } } diff --git a/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java index c3c6de5355af4..e8bb946c8a795 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java @@ -20,7 +20,6 @@ package org.elasticsearch.common.settings; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.search.spell.LevensteinDistance; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.ExceptionsHelper; @@ -135,7 +134,7 @@ public synchronized Settings validateUpdate(Settings settings) { settingUpdater.getValue(current, previous); } catch (RuntimeException ex) { exceptions.add(ex); - logger.debug((Supplier) () -> new ParameterizedMessage("failed to prepareCommit settings for [{}]", settingUpdater), ex); + logger.debug(() -> new ParameterizedMessage("failed to prepareCommit settings for [{}]", settingUpdater), ex); } } // here we are exhaustive and record all settings that failed. @@ -163,8 +162,7 @@ public synchronized Settings applySettings(Settings newSettings) { try { applyRunnables.add(settingUpdater.updater(current, previous)); } catch (Exception ex) { - logger.warn( - (Supplier) () -> new ParameterizedMessage("failed to prepareCommit settings for [{}]", settingUpdater), ex); + logger.warn(() -> new ParameterizedMessage("failed to prepareCommit settings for [{}]", settingUpdater), ex); throw ex; } } diff --git a/server/src/main/java/org/elasticsearch/common/util/IndexFolderUpgrader.java b/server/src/main/java/org/elasticsearch/common/util/IndexFolderUpgrader.java index 3ee7d1f23add2..b709c48d8c26c 100644 --- a/server/src/main/java/org/elasticsearch/common/util/IndexFolderUpgrader.java +++ b/server/src/main/java/org/elasticsearch/common/util/IndexFolderUpgrader.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.logging.Loggers; @@ -67,7 +66,7 @@ void upgrade(final Index index, final Path source, final Path target) throws IOE } catch (NoSuchFileException | FileNotFoundException exception) { // thrown when the source is non-existent because the folder was renamed // by another node (shared FS) after we checked if the target exists - logger.error((Supplier) () -> new ParameterizedMessage("multiple nodes trying to upgrade [{}] in parallel, retry " + + logger.error(() -> new ParameterizedMessage("multiple nodes trying to upgrade [{}] in parallel, retry " + "upgrading with single node", target), exception); throw exception; } finally { diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/LoggingRunnable.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/LoggingRunnable.java index 825d18b7e63cb..2dc3f6677f332 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/LoggingRunnable.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/LoggingRunnable.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; public class LoggingRunnable implements Runnable { @@ -38,7 +37,7 @@ public void run() { try { runnable.run(); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to execute [{}]", runnable.toString()), e); + logger.warn(() -> new ParameterizedMessage("failed to execute [{}]", runnable.toString()), e); } } diff --git a/server/src/main/java/org/elasticsearch/discovery/AckClusterStatePublishResponseHandler.java b/server/src/main/java/org/elasticsearch/discovery/AckClusterStatePublishResponseHandler.java index b432d0538c985..fd47fd0e86d51 100644 --- a/server/src/main/java/org/elasticsearch/discovery/AckClusterStatePublishResponseHandler.java +++ b/server/src/main/java/org/elasticsearch/discovery/AckClusterStatePublishResponseHandler.java @@ -20,7 +20,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.logging.ESLoggerFactory; @@ -70,7 +69,7 @@ private void onNodeAck(final Discovery.AckListener ackListener, DiscoveryNode no ackListener.onNodeAck(node, e); } catch (Exception inner) { inner.addSuppressed(e); - logger.debug((Supplier) () -> new ParameterizedMessage("error while processing ack for node [{}]", node), inner); + logger.debug(() -> new ParameterizedMessage("error while processing ack for node [{}]", node), inner); } } } diff --git a/server/src/main/java/org/elasticsearch/discovery/single/SingleNodeDiscovery.java b/server/src/main/java/org/elasticsearch/discovery/single/SingleNodeDiscovery.java index 2a32caabc77a4..94ea33d1a16ab 100644 --- a/server/src/main/java/org/elasticsearch/discovery/single/SingleNodeDiscovery.java +++ b/server/src/main/java/org/elasticsearch/discovery/single/SingleNodeDiscovery.java @@ -76,11 +76,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS public void onFailure(String source, Exception e) { latch.countDown(); ackListener.onNodeAck(transportService.getLocalNode(), e); - logger.warn( - (org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( - "failed while applying cluster state locally [{}]", - event.source()), - e); + logger.warn(() -> new ParameterizedMessage("failed while applying cluster state locally [{}]", event.source()), e); } }; clusterApplier.onNewClusterState("apply-locally-on-node[" + event.source() + "]", () -> clusterState, listener); diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/MasterFaultDetection.java b/server/src/main/java/org/elasticsearch/discovery/zen/MasterFaultDetection.java index fff5e7cb5c983..c38cfe88619ee 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/MasterFaultDetection.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/MasterFaultDetection.java @@ -20,7 +20,6 @@ package org.elasticsearch.discovery.zen; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -270,13 +269,9 @@ public void handleException(TransportException exp) { } int retryCount = ++MasterFaultDetection.this.retryCount; - logger.trace( - (Supplier) () -> new ParameterizedMessage( + logger.trace(() -> new ParameterizedMessage( "[master] failed to ping [{}], retry [{}] out of [{}]", - masterNode, - retryCount, - pingRetryCount), - exp); + masterNode, retryCount, pingRetryCount), exp); if (retryCount >= pingRetryCount) { logger.debug("[master] failed to ping [{}], tried [{}] times, each with maximum [{}] timeout", masterNode, pingRetryCount, pingRetryTimeout); diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java b/server/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java index 7d10466b638a8..e36497d09164f 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java @@ -20,7 +20,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -364,7 +363,7 @@ public void onFailure(String source, Exception e) { try { callback.onFailure(e); } catch (Exception inner) { - logger.error((Supplier) () -> new ParameterizedMessage("error handling task failure [{}]", e), inner); + logger.error(() -> new ParameterizedMessage("error handling task failure [{}]", e), inner); } } } @@ -375,7 +374,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS try { callback.onSuccess(); } catch (Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected error during [{}]", source), e); + logger.error(() -> new ParameterizedMessage("unexpected error during [{}]", source), e); } } } diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/NodesFaultDetection.java b/server/src/main/java/org/elasticsearch/discovery/zen/NodesFaultDetection.java index 5cd02a52504f5..218e6e3f63f95 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/NodesFaultDetection.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/NodesFaultDetection.java @@ -20,7 +20,6 @@ package org.elasticsearch.discovery.zen; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -177,12 +176,8 @@ public void run() { } }); } catch (EsRejectedExecutionException ex) { - logger.trace( - (Supplier) () -> new ParameterizedMessage( - "[node ] [{}] ignoring node failure (reason [{}]). Local node is shutting down", - node, - reason), - ex); + logger.trace(() -> new ParameterizedMessage( + "[node ] [{}] ignoring node failure (reason [{}]). Local node is shutting down", node, reason), ex); } } @@ -247,13 +242,8 @@ public void handleException(TransportException exp) { } retryCount++; - logger.trace( - (Supplier) () -> new ParameterizedMessage( - "[node ] failed to ping [{}], retry [{}] out of [{}]", - node, - retryCount, - pingRetryCount), - exp); + logger.trace( () -> new ParameterizedMessage( + "[node ] failed to ping [{}], retry [{}] out of [{}]", node, retryCount, pingRetryCount), exp); if (retryCount >= pingRetryCount) { logger.debug("[node ] failed to ping [{}], tried [{}] times, each with maximum [{}] timeout", node, pingRetryCount, pingRetryTimeout); diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/PublishClusterStateAction.java b/server/src/main/java/org/elasticsearch/discovery/zen/PublishClusterStateAction.java index 13bcf1f15f56a..382a42141d83a 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/PublishClusterStateAction.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/PublishClusterStateAction.java @@ -247,9 +247,7 @@ private void sendFullClusterState(ClusterState clusterState, Map) () -> - new ParameterizedMessage("failed to serialize cluster_state before publishing it to node {}", node), e); + logger.warn(() -> new ParameterizedMessage("failed to serialize cluster_state before publishing it to node {}", node), e); sendingController.onNodeSendFailed(node, e); return; } @@ -297,16 +295,13 @@ public void handleException(TransportException exp) { logger.debug("resending full cluster state to node {} reason {}", node, exp.getDetailedMessage()); sendFullClusterState(clusterState, serializedStates, node, publishTimeout, sendingController); } else { - logger.debug((org.apache.logging.log4j.util.Supplier) () -> - new ParameterizedMessage("failed to send cluster state to {}", node), exp); + logger.debug(() -> new ParameterizedMessage("failed to send cluster state to {}", node), exp); sendingController.onNodeSendFailed(node, exp); } } }); } catch (Exception e) { - logger.warn( - (org.apache.logging.log4j.util.Supplier) () -> - new ParameterizedMessage("error sending cluster state to {}", node), e); + logger.warn(() -> new ParameterizedMessage("error sending cluster state to {}", node), e); sendingController.onNodeSendFailed(node, e); } } @@ -333,15 +328,13 @@ public void handleResponse(TransportResponse.Empty response) { @Override public void handleException(TransportException exp) { - logger.debug((org.apache.logging.log4j.util.Supplier) () -> - new ParameterizedMessage("failed to commit cluster state (uuid [{}], version [{}]) to {}", + logger.debug(() -> new ParameterizedMessage("failed to commit cluster state (uuid [{}], version [{}]) to {}", clusterState.stateUUID(), clusterState.version(), node), exp); sendingController.getPublishResponseHandler().onFailure(node, exp); } }); } catch (Exception t) { - logger.warn((org.apache.logging.log4j.util.Supplier) () -> - new ParameterizedMessage("error sending cluster state commit (uuid [{}], version [{}]) to {}", + logger.warn(() -> new ParameterizedMessage("error sending cluster state commit (uuid [{}], version [{}]) to {}", clusterState.stateUUID(), clusterState.version(), node), t); sendingController.getPublishResponseHandler().onFailure(node, t); } @@ -616,7 +609,7 @@ private synchronized boolean markAsFailed(String details, Exception reason) { if (committedOrFailed()) { return committed == false; } - logger.trace((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage("failed to commit version [{}]. {}", + logger.trace(() -> new ParameterizedMessage("failed to commit version [{}]. {}", clusterState.version(), details), reason); committed = false; committedOrFailedLatch.countDown(); diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java b/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java index 312c954cf6484..64d51c2b5c4b3 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java @@ -22,7 +22,6 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.Version; @@ -513,18 +512,13 @@ protected void doRun() throws Exception { public void onFailure(Exception e) { if (e instanceof ConnectTransportException || e instanceof AlreadyClosedException) { // can't connect to the node - this is more common path! - logger.trace( - (Supplier) () -> new ParameterizedMessage( - "[{}] failed to ping {}", pingingRound.id(), node), e); + logger.trace(() -> new ParameterizedMessage("[{}] failed to ping {}", pingingRound.id(), node), e); } else if (e instanceof RemoteTransportException) { // something went wrong on the other side - logger.debug( - (Supplier) () -> new ParameterizedMessage( + logger.debug(() -> new ParameterizedMessage( "[{}] received a remote error as a response to ping {}", pingingRound.id(), node), e); } else { - logger.warn( - (Supplier) () -> new ParameterizedMessage( - "[{}] failed send ping to {}", pingingRound.id(), node), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed send ping to {}", pingingRound.id(), node), e); } } @@ -574,9 +568,9 @@ public void handleException(TransportException exp) { if (exp instanceof ConnectTransportException || exp.getCause() instanceof ConnectTransportException || exp.getCause() instanceof AlreadyClosedException) { // ok, not connected... - logger.trace((Supplier) () -> new ParameterizedMessage("failed to connect to {}", node), exp); + logger.trace(() -> new ParameterizedMessage("failed to connect to {}", node), exp); } else if (closed == false) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to send ping to [{}]", node), exp); + logger.warn(() -> new ParameterizedMessage("failed to send ping to [{}]", node), exp); } } }; diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/server/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index 4946e9179d58d..79ba587974398 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -291,7 +291,7 @@ protected void doStop() { try { membership.sendLeaveRequestBlocking(nodes.getMasterNode(), nodes.getLocalNode(), TimeValue.timeValueSeconds(1)); } catch (Exception e) { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to send leave request to master [{}]", nodes.getMasterNode()), e); + logger.debug(() -> new ParameterizedMessage("failed to send leave request to master [{}]", nodes.getMasterNode()), e); } } else { // we're master -> let other potential master we left and start a master election now rather then wait for masterFD @@ -303,7 +303,7 @@ protected void doStop() { try { membership.sendLeaveRequest(nodes.getLocalNode(), possibleMaster); } catch (Exception e) { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to send leave request from master [{}] to possible master [{}]", nodes.getMasterNode(), possibleMaster), e); + logger.debug(() -> new ParameterizedMessage("failed to send leave request from master [{}] to possible master [{}]", nodes.getMasterNode(), possibleMaster), e); } } } @@ -367,11 +367,8 @@ public void onNewClusterStateFailed(Exception e) { processedOrFailed.set(true); latch.countDown(); ackListener.onNodeAck(localNode, e); - logger.warn( - (org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( - "failed while applying cluster state locally [{}]", - clusterChangedEvent.source()), - e); + logger.warn(() -> new ParameterizedMessage( + "failed while applying cluster state locally [{}]", clusterChangedEvent.source()), e); } }); @@ -393,11 +390,8 @@ public void onNewClusterStateFailed(Exception e) { try { latch.await(); } catch (InterruptedException e) { - logger.debug( - (org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( - "interrupted while applying cluster state locally [{}]", - clusterChangedEvent.source()), - e); + logger.debug(() -> new ParameterizedMessage( + "interrupted while applying cluster state locally [{}]", clusterChangedEvent.source()), e); Thread.currentThread().interrupt(); } } @@ -514,7 +508,7 @@ private boolean joinElectedMaster(DiscoveryNode masterNode) { // first, make sure we can connect to the master transportService.connectToNode(masterNode); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to connect to master [{}], retrying...", masterNode), e); + logger.warn(() -> new ParameterizedMessage("failed to connect to master [{}], retrying...", masterNode), e); return false; } int joinAttempt = 0; // we retry on illegal state if the master is not yet ready @@ -534,7 +528,7 @@ private boolean joinElectedMaster(DiscoveryNode masterNode) { } } else { if (logger.isTraceEnabled()) { - logger.trace((Supplier) () -> new ParameterizedMessage("failed to send join request to master [{}]", masterNode), e); + logger.trace(() -> new ParameterizedMessage("failed to send join request to master [{}]", masterNode), e); } else { logger.info("failed to send join request to master [{}], reason [{}]", masterNode, ExceptionsHelper.detailedMessage(e)); } @@ -646,7 +640,7 @@ ClusterState remainingNodesClusterState(final ClusterState currentState, Discove @Override public void onFailure(final String source, final Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); + logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e); } @Override @@ -718,7 +712,7 @@ private void handleMasterGone(final DiscoveryNode masterNode, final Throwable ca return; } - logger.info((Supplier) () -> new ParameterizedMessage("master_left [{}], reason [{}]", masterNode, reason), cause); + logger.info(() -> new ParameterizedMessage("master_left [{}], reason [{}]", masterNode, reason), cause); synchronized (stateMutex) { if (localNodeMaster() == false && masterNode.equals(committedState.get().nodes().getMasterNode())) { @@ -764,7 +758,7 @@ boolean processNextCommittedClusterState(String reason) { pendingStatesQueue.markAsFailed(newClusterState, e); } catch (Exception inner) { inner.addSuppressed(e); - logger.error((Supplier) () -> new ParameterizedMessage("unexpected exception while failing [{}]", reason), inner); + logger.error(() -> new ParameterizedMessage("unexpected exception while failing [{}]", reason), inner); } return false; } @@ -807,14 +801,14 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS @Override public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure applying [{}]", reason), e); + logger.error(() -> new ParameterizedMessage("unexpected failure applying [{}]", reason), e); try { // TODO: use cluster state uuid instead of full cluster state so that we don't keep reference to CS around // for too long. pendingStatesQueue.markAsFailed(newClusterState, e); } catch (Exception inner) { inner.addSuppressed(e); - logger.error((Supplier) () -> new ParameterizedMessage("unexpected exception while failing [{}]", reason), inner); + logger.error(() -> new ParameterizedMessage("unexpected exception while failing [{}]", reason), inner); } } }); @@ -880,7 +874,7 @@ void handleJoinRequest(final DiscoveryNode node, final ClusterState state, final try { membership.sendValidateJoinRequestBlocking(node, state, joinTimeout); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to validate incoming join request from node [{}]", node), + logger.warn(() -> new ParameterizedMessage("failed to validate incoming join request from node [{}]", node), e); callback.onFailure(new IllegalStateException("failure when sending a validation request to node", e)); return; @@ -1029,11 +1023,11 @@ private void handleAnotherMaster(ClusterState localClusterState, final Discovery @Override public void handleException(TransportException exp) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to send rejoin request to [{}]", otherMaster), exp); + logger.warn(() -> new ParameterizedMessage("failed to send rejoin request to [{}]", otherMaster), exp); } }); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to send rejoin request to [{}]", otherMaster), e); + logger.warn(() -> new ParameterizedMessage("failed to send rejoin request to [{}]", otherMaster), e); } } } diff --git a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java index 15fa0a0f87cc2..87874bd45000c 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.store.Directory; @@ -218,8 +217,8 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce } } catch (IOException e) { - startupTraceLogger.trace( - (Supplier) () -> new ParameterizedMessage("failed to obtain node lock on {}", dir.toAbsolutePath()), e); + startupTraceLogger.trace(() -> new ParameterizedMessage( + "failed to obtain node lock on {}", dir.toAbsolutePath()), e); lastException = new IOException("failed to obtain lock on " + dir.toAbsolutePath(), e); // release all the ones that were obtained up until now releaseAndNullLocks(locks); @@ -898,7 +897,7 @@ public void close() { logger.trace("releasing lock [{}]", lock); lock.close(); } catch (IOException e) { - logger.trace((Supplier) () -> new ParameterizedMessage("failed to release lock [{}]", lock), e); + logger.trace(() -> new ParameterizedMessage("failed to release lock [{}]", lock), e); } } } diff --git a/server/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java b/server/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java index e2bbae775e5d7..0a91ba81443ed 100644 --- a/server/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java +++ b/server/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java @@ -21,7 +21,6 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; @@ -217,7 +216,7 @@ protected synchronized void processAsyncFetch(List responses, List) () -> new ParameterizedMessage("{}: failed to list shard for {} on node [{}]", + logger.warn(() -> new ParameterizedMessage("{}: failed to list shard for {} on node [{}]", shardId, type, failure.nodeId()), failure); nodeEntry.doneFetching(failure.getCause()); } diff --git a/server/src/main/java/org/elasticsearch/gateway/Gateway.java b/server/src/main/java/org/elasticsearch/gateway/Gateway.java index f4d191ac28a8a..ae8f5a85def44 100644 --- a/server/src/main/java/org/elasticsearch/gateway/Gateway.java +++ b/server/src/main/java/org/elasticsearch/gateway/Gateway.java @@ -128,9 +128,7 @@ public void performStateRecovery(final GatewayStateRecoveredListener listener) t } } catch (Exception e) { final Index electedIndex = electedIndexMetaData.getIndex(); - logger.warn( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("recovering index {} failed - recovering as closed", electedIndex), e); + logger.warn(() -> new ParameterizedMessage("recovering index {} failed - recovering as closed", electedIndex), e); electedIndexMetaData = IndexMetaData.builder(electedIndexMetaData).state(IndexMetaData.State.CLOSE).build(); } @@ -159,13 +157,8 @@ private void logUnknownSetting(String settingType, Map.Entry e) } private void logInvalidSetting(String settingType, Map.Entry e, IllegalArgumentException ex) { - logger.warn( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("ignoring invalid {} setting: [{}] with value [{}]; archiving", - settingType, - e.getKey(), - e.getValue()), - ex); + logger.warn(() -> new ParameterizedMessage("ignoring invalid {} setting: [{}] with value [{}]; archiving", + settingType, e.getKey(), e.getValue()), ex); } public interface GatewayStateRecoveredListener { diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayService.java b/server/src/main/java/org/elasticsearch/gateway/GatewayService.java index 91ce90bd8b58c..d77031218179c 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayService.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayService.java @@ -21,7 +21,6 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; @@ -283,7 +282,7 @@ public ClusterState execute(ClusterState currentState) { @Override public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); + logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e); GatewayRecoveryListener.this.onFailure("failed to updated cluster state"); } diff --git a/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java b/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java index 5f75771e9e63f..116d181ccd3a2 100644 --- a/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java +++ b/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java @@ -20,7 +20,6 @@ package org.elasticsearch.gateway; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; @@ -158,7 +157,7 @@ public ClusterState execute(ClusterState currentState) { minIndexCompatibilityVersion); } catch (Exception ex) { // upgrade failed - adding index as closed - logger.warn((Supplier) () -> new ParameterizedMessage("found dangled index [{}] on node [{}]. This index cannot be upgraded to the latest version, adding as closed", indexMetaData.getIndex(), request.fromNode), ex); + logger.warn(() -> new ParameterizedMessage("found dangled index [{}] on node [{}]. This index cannot be upgraded to the latest version, adding as closed", indexMetaData.getIndex(), request.fromNode), ex); upgradedIndexMetaData = IndexMetaData.builder(indexMetaData).state(IndexMetaData.State.CLOSE).version(indexMetaData.getVersion() + 1).build(); } metaData.put(upgradedIndexMetaData, false); @@ -183,7 +182,7 @@ public ClusterState execute(ClusterState currentState) { @Override public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); + logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e); try { channel.sendResponse(e); } catch (Exception inner) { diff --git a/server/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java b/server/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java index 0ac421b699faa..b6c8d411474c9 100644 --- a/server/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java +++ b/server/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java @@ -20,7 +20,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexFormatTooNewException; @@ -323,8 +322,7 @@ public T loadLatestState(Logger logger, NamedXContentRegistry namedXContentRegi return state; } catch (Exception e) { exceptions.add(new IOException("failed to read " + pathAndStateId.toString(), e)); - logger.debug( - (Supplier) () -> new ParameterizedMessage( + logger.debug(() -> new ParameterizedMessage( "{}: failed to read [{}], ignoring...", pathAndStateId.file.toAbsolutePath(), prefix), e); } } diff --git a/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java b/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java index 7fab7acc5f22d..00b981175f228 100644 --- a/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java +++ b/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java @@ -20,7 +20,6 @@ package org.elasticsearch.gateway; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.Nullable; @@ -125,7 +124,7 @@ public void writeIndex(String reason, IndexMetaData indexMetaData) throws IOExce IndexMetaData.FORMAT.write(indexMetaData, nodeEnv.indexPaths(indexMetaData.getIndex())); } catch (Exception ex) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}]: failed to write index state", index), ex); + logger.warn(() -> new ParameterizedMessage("[{}]: failed to write index state", index), ex); throw new IOException("failed to write state for [" + index + "]", ex); } } diff --git a/server/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java b/server/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java index c66c00728a715..f9344186c5753 100644 --- a/server/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java +++ b/server/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RecoverySource; @@ -259,9 +258,9 @@ protected static NodeShardsResult buildNodeShardsResult(ShardRouting shard, bool } else { final String finalAllocationId = allocationId; if (nodeShardState.storeException() instanceof ShardLockObtainFailedException) { - logger.trace((Supplier) () -> new ParameterizedMessage("[{}] on node [{}] has allocation id [{}] but the store can not be opened as it's locked, treating as valid shard", shard, nodeShardState.getNode(), finalAllocationId), nodeShardState.storeException()); + logger.trace(() -> new ParameterizedMessage("[{}] on node [{}] has allocation id [{}] but the store can not be opened as it's locked, treating as valid shard", shard, nodeShardState.getNode(), finalAllocationId), nodeShardState.storeException()); } else { - logger.trace((Supplier) () -> new ParameterizedMessage("[{}] on node [{}] has allocation id [{}] but the store can not be opened, treating as no allocation id", shard, nodeShardState.getNode(), finalAllocationId), nodeShardState.storeException()); + logger.trace(() -> new ParameterizedMessage("[{}] on node [{}] has allocation id [{}] but the store can not be opened, treating as no allocation id", shard, nodeShardState.getNode(), finalAllocationId), nodeShardState.storeException()); allocationId = null; } } diff --git a/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java b/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java index 11df875d4dd99..e854584b150d8 100644 --- a/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java +++ b/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java @@ -20,7 +20,6 @@ package org.elasticsearch.gateway; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -146,8 +145,7 @@ protected NodeGatewayStartedShards nodeOperation(NodeRequest request) { Store.tryOpenIndex(shardPath.resolveIndex(), shardId, nodeEnv::shardLock, logger); } catch (Exception exception) { final ShardPath finalShardPath = shardPath; - logger.trace( - (Supplier) () -> new ParameterizedMessage( + logger.trace(() -> new ParameterizedMessage( "{} can't open index for shard [{}] in path [{}]", shardId, shardStateMetaData, diff --git a/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java b/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java index 90d8a205e8b57..1bdec683bfbd0 100644 --- a/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java +++ b/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.logging.Loggers; @@ -61,7 +60,7 @@ public void shardRoutingChanged(IndexShard indexShard, @Nullable ShardRouting ol try { listener.shardRoutingChanged(indexShard, oldRouting, newRouting); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to invoke shard touring changed callback", indexShard.shardId().getId()), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke shard touring changed callback", indexShard.shardId().getId()), e); } } } @@ -72,7 +71,7 @@ public void afterIndexShardCreated(IndexShard indexShard) { try { listener.afterIndexShardCreated(indexShard); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to invoke after shard created callback", indexShard.shardId().getId()), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke after shard created callback", indexShard.shardId().getId()), e); throw e; } } @@ -84,7 +83,7 @@ public void afterIndexShardStarted(IndexShard indexShard) { try { listener.afterIndexShardStarted(indexShard); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to invoke after shard started callback", indexShard.shardId().getId()), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke after shard started callback", indexShard.shardId().getId()), e); throw e; } } @@ -97,7 +96,7 @@ public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSh try { listener.beforeIndexShardClosed(shardId, indexShard, indexSettings); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to invoke before shard closed callback", shardId.getId()), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke before shard closed callback", shardId.getId()), e); throw e; } } @@ -110,7 +109,7 @@ public void afterIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSha try { listener.afterIndexShardClosed(shardId, indexShard, indexSettings); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to invoke after shard closed callback", shardId.getId()), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke after shard closed callback", shardId.getId()), e); throw e; } } @@ -122,7 +121,7 @@ public void onShardInactive(IndexShard indexShard) { try { listener.onShardInactive(indexShard); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to invoke on shard inactive callback", indexShard.shardId().getId()), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke on shard inactive callback", indexShard.shardId().getId()), e); throw e; } } @@ -134,7 +133,7 @@ public void indexShardStateChanged(IndexShard indexShard, @Nullable IndexShardSt try { listener.indexShardStateChanged(indexShard, previousState, indexShard.state(), reason); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to invoke index shard state changed callback", indexShard.shardId().getId()), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke index shard state changed callback", indexShard.shardId().getId()), e); throw e; } } @@ -170,7 +169,7 @@ public void beforeIndexShardCreated(ShardId shardId, Settings indexSettings) { try { listener.beforeIndexShardCreated(shardId, indexSettings); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to invoke before shard created callback", shardId), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke before shard created callback", shardId), e); throw e; } } @@ -207,7 +206,7 @@ public void beforeIndexShardDeleted(ShardId shardId, try { listener.beforeIndexShardDeleted(shardId, indexSettings); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to invoke before shard deleted callback", shardId.getId()), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke before shard deleted callback", shardId.getId()), e); throw e; } } @@ -220,7 +219,7 @@ public void afterIndexShardDeleted(ShardId shardId, try { listener.afterIndexShardDeleted(shardId, indexSettings); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to invoke after shard deleted callback", shardId.getId()), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke after shard deleted callback", shardId.getId()), e); throw e; } } diff --git a/server/src/main/java/org/elasticsearch/index/IndexService.java b/server/src/main/java/org/elasticsearch/index/IndexService.java index 39eac18b29441..4fe50b983dfac 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexService.java +++ b/server/src/main/java/org/elasticsearch/index/IndexService.java @@ -431,8 +431,7 @@ private void closeShard(String reason, ShardId sId, IndexShard indexShard, Store final boolean flushEngine = deleted.get() == false && closed.get(); indexShard.close(reason, flushEngine); } catch (Exception e) { - logger.debug((org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("[{}] failed to close index shard", shardId), e); + logger.debug(() -> new ParameterizedMessage("[{}] failed to close index shard", shardId), e); // ignore } } diff --git a/server/src/main/java/org/elasticsearch/index/IndexWarmer.java b/server/src/main/java/org/elasticsearch/index/IndexWarmer.java index 0c901cf65010b..f8b9d9d2ef805 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexWarmer.java +++ b/server/src/main/java/org/elasticsearch/index/IndexWarmer.java @@ -20,7 +20,6 @@ package org.elasticsearch.index; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.DirectoryReader; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; @@ -154,9 +153,7 @@ public TerminationHandle warmReader(final IndexShard indexShard, final Engine.Se indexShard .warmerService() .logger() - .warn( - (Supplier) () -> new ParameterizedMessage( - "failed to warm-up global ordinals for [{}]", fieldType.name()), e); + .warn(() -> new ParameterizedMessage("failed to warm-up global ordinals for [{}]", fieldType.name()), e); } finally { latch.countDown(); } diff --git a/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java b/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java index 6af9c5eeb6e51..a59af29036b7d 100644 --- a/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java +++ b/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.cache.bitset; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.index.LeafReaderContext; @@ -263,7 +262,7 @@ public IndexWarmer.TerminationHandle warmReader(final IndexShard indexShard, fin indexShard.warmerService().logger().trace("warmed bitset for [{}], took [{}]", filterToWarm, TimeValue.timeValueNanos(System.nanoTime() - start)); } } catch (Exception e) { - indexShard.warmerService().logger().warn((Supplier) () -> new ParameterizedMessage("failed to load bitset for [{}]", filterToWarm), e); + indexShard.warmerService().logger().warn(() -> new ParameterizedMessage("failed to load bitset for [{}]", filterToWarm), e); } finally { latch.countDown(); } diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index 1452c5de49278..1ca4468539da1 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexFileNames; @@ -597,7 +596,7 @@ private ImmutableOpenMap getSegmentFileSizes(SegmentReader segment try { directory = engineConfig.getCodec().compoundFormat().getCompoundReader(segmentReader.directory(), segmentCommitInfo.info, IOContext.READ); } catch (IOException e) { - logger.warn((Supplier) () -> new ParameterizedMessage("Error when opening compound reader for Directory [{}] and SegmentCommitInfo [{}]", segmentReader.directory(), segmentCommitInfo), e); + logger.warn(() -> new ParameterizedMessage("Error when opening compound reader for Directory [{}] and SegmentCommitInfo [{}]", segmentReader.directory(), segmentCommitInfo), e); return ImmutableOpenMap.of(); } @@ -613,15 +612,14 @@ private ImmutableOpenMap getSegmentFileSizes(SegmentReader segment files = directory.listAll(); } catch (IOException e) { final Directory finalDirectory = directory; - logger.warn( - (Supplier) () -> new ParameterizedMessage("Couldn't list Compound Reader Directory [{}]", finalDirectory), e); + logger.warn(() -> new ParameterizedMessage("Couldn't list Compound Reader Directory [{}]", finalDirectory), e); return ImmutableOpenMap.of(); } } else { try { files = segmentReader.getSegmentInfo().files().toArray(new String[]{}); } catch (IOException e) { - logger.warn((Supplier) () -> new ParameterizedMessage("Couldn't list Directory from SegmentReader [{}] and SegmentInfo [{}]", segmentReader, segmentReader.getSegmentInfo()), e); + logger.warn(() -> new ParameterizedMessage("Couldn't list Directory from SegmentReader [{}] and SegmentInfo [{}]", segmentReader, segmentReader.getSegmentInfo()), e); return ImmutableOpenMap.of(); } } @@ -634,13 +632,10 @@ private ImmutableOpenMap getSegmentFileSizes(SegmentReader segment length = directory.fileLength(file); } catch (NoSuchFileException | FileNotFoundException e) { final Directory finalDirectory = directory; - logger.warn((Supplier) - () -> new ParameterizedMessage("Tried to query fileLength but file is gone [{}] [{}]", finalDirectory, file), e); + logger.warn(() -> new ParameterizedMessage("Tried to query fileLength but file is gone [{}] [{}]", finalDirectory, file), e); } catch (IOException e) { final Directory finalDirectory = directory; - logger.warn( - (Supplier) - () -> new ParameterizedMessage("Error when trying to query fileLength [{}] [{}]", finalDirectory, file), e); + logger.warn(() -> new ParameterizedMessage("Error when trying to query fileLength [{}] [{}]", finalDirectory, file), e); } if (length == 0L) { continue; @@ -653,9 +648,7 @@ private ImmutableOpenMap getSegmentFileSizes(SegmentReader segment directory.close(); } catch (IOException e) { final Directory finalDirectory = directory; - logger.warn( - (Supplier) - () -> new ParameterizedMessage("Error when closing compound reader on Directory [{}]", finalDirectory), e); + logger.warn(() -> new ParameterizedMessage("Error when closing compound reader on Directory [{}]", finalDirectory), e); } } @@ -706,7 +699,7 @@ protected Segment[] getSegmentInfo(SegmentInfos lastCommittedSegmentInfos, boole try { segment.sizeInBytes = info.sizeInBytes(); } catch (IOException e) { - logger.trace((Supplier) () -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e); + logger.trace(() -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e); } segments.put(info.info.name, segment); } else { @@ -732,7 +725,7 @@ private void fillSegmentInfo(SegmentReader segmentReader, boolean verbose, boole try { segment.sizeInBytes = info.sizeInBytes(); } catch (IOException e) { - logger.trace((Supplier) () -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e); + logger.trace(() -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e); } segment.memoryInBytes = segmentReader.ramBytesUsed(); segment.segmentSort = info.info.getIndexSort(); @@ -880,7 +873,7 @@ public void failEngine(String reason, @Nullable Exception failure) { store.incRef(); try { if (failedEngine.get() != null) { - logger.warn((Supplier) () -> new ParameterizedMessage("tried to fail engine but engine is already failed. ignoring. [{}]", reason), failure); + logger.warn(() -> new ParameterizedMessage("tried to fail engine but engine is already failed. ignoring. [{}]", reason), failure); return; } // this must happen before we close IW or Translog such that we can check this state to opt out of failing the engine @@ -890,7 +883,7 @@ public void failEngine(String reason, @Nullable Exception failure) { // we just go and close this engine - no way to recover closeNoLock("engine failed on: [" + reason + "]", closedLatch); } finally { - logger.warn((Supplier) () -> new ParameterizedMessage("failed engine [{}]", reason), failure); + logger.warn(() -> new ParameterizedMessage("failed engine [{}]", reason), failure); // we must set a failure exception, generate one if not supplied // we first mark the store as corrupted before we notify any listeners // this must happen first otherwise we might try to reallocate so quickly @@ -913,7 +906,7 @@ public void failEngine(String reason, @Nullable Exception failure) { store.decRef(); } } else { - logger.debug((Supplier) () -> new ParameterizedMessage("tried to fail engine but could not acquire lock - engine should be failed by now [{}]", reason), failure); + logger.debug(() -> new ParameterizedMessage("tried to fail engine but could not acquire lock - engine should be failed by now [{}]", reason), failure); } } diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 6c6752de7d2d4..cc5c4799479da 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexReader; @@ -1808,7 +1807,7 @@ public Searcher acquireSearcher(String source, SearcherScope scope) { throw ex; } catch (Exception ex) { ensureOpen(ex); // throw EngineCloseException here if we are already closed - logger.error((Supplier) () -> new ParameterizedMessage("failed to acquire searcher, source {}", source), ex); + logger.error(() -> new ParameterizedMessage("failed to acquire searcher, source {}", source), ex); throw new EngineException(shardId, "failed to acquire searcher, source " + source, ex); } finally { Releasables.close(releasable); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 4c690a42a8572..4f3b045bfc295 100755 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -220,7 +220,7 @@ public boolean updateMapping(IndexMetaData indexMetaData) throws IOException { // only update entries if needed updatedEntries = internalMerge(indexMetaData, MergeReason.MAPPING_RECOVERY, true); } catch (Exception e) { - logger.warn((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage("[{}] failed to apply mappings", index()), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to apply mappings", index()), e); throw e; } diff --git a/server/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java b/server/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java index 52c2b2ae2cf8a..67e0f5400b389 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.bulk.BackoffPolicy; @@ -106,7 +105,7 @@ public void onResponse(ClearScrollResponse response) { @Override public void onFailure(Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("Failed to clear scroll [{}]", scrollId), e); + logger.warn(() -> new ParameterizedMessage("Failed to clear scroll [{}]", scrollId), e); onCompletion.run(); } }); @@ -155,12 +154,11 @@ public void onFailure(Exception e) { if (retries.hasNext()) { retryCount += 1; TimeValue delay = retries.next(); - logger.trace((Supplier) () -> new ParameterizedMessage("retrying rejected search after [{}]", delay), e); + logger.trace(() -> new ParameterizedMessage("retrying rejected search after [{}]", delay), e); countSearchRetry.run(); threadPool.schedule(delay, ThreadPool.Names.SAME, retryWithContext); } else { - logger.warn( - (Supplier) () -> new ParameterizedMessage( + logger.warn(() -> new ParameterizedMessage( "giving up on search because we retried [{}] times without success", retryCount), e); fail.accept(e); } diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 9da8642fd61e4..30f813e86e234 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -880,8 +880,7 @@ public DocsStats docStats() { try { sizeInBytes += info.sizeInBytes(); } catch (IOException e) { - logger.trace((org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e); + logger.trace(() -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e); } } } diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexingOperationListener.java b/server/src/main/java/org/elasticsearch/index/shard/IndexingOperationListener.java index 335196fe68198..288832f1375c6 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexingOperationListener.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexingOperationListener.java @@ -20,7 +20,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.index.engine.Engine; import java.util.List; @@ -94,7 +93,7 @@ public Engine.Index preIndex(ShardId shardId, Engine.Index operation) { try { listener.preIndex(shardId, operation); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("preIndex listener [{}] failed", listener), e); + logger.warn(() -> new ParameterizedMessage("preIndex listener [{}] failed", listener), e); } } return operation; @@ -107,7 +106,7 @@ public void postIndex(ShardId shardId, Engine.Index index, Engine.IndexResult re try { listener.postIndex(shardId, index, result); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("postIndex listener [{}] failed", listener), e); + logger.warn(() -> new ParameterizedMessage("postIndex listener [{}] failed", listener), e); } } } @@ -120,7 +119,7 @@ public void postIndex(ShardId shardId, Engine.Index index, Exception ex) { listener.postIndex(shardId, index, ex); } catch (Exception inner) { inner.addSuppressed(ex); - logger.warn((Supplier) () -> new ParameterizedMessage("postIndex listener [{}] failed", listener), inner); + logger.warn(() -> new ParameterizedMessage("postIndex listener [{}] failed", listener), inner); } } } @@ -132,7 +131,7 @@ public Engine.Delete preDelete(ShardId shardId, Engine.Delete delete) { try { listener.preDelete(shardId, delete); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("preDelete listener [{}] failed", listener), e); + logger.warn(() -> new ParameterizedMessage("preDelete listener [{}] failed", listener), e); } } return delete; @@ -145,7 +144,7 @@ public void postDelete(ShardId shardId, Engine.Delete delete, Engine.DeleteResul try { listener.postDelete(shardId, delete, result); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("postDelete listener [{}] failed", listener), e); + logger.warn(() -> new ParameterizedMessage("postDelete listener [{}] failed", listener), e); } } } @@ -158,7 +157,7 @@ public void postDelete(ShardId shardId, Engine.Delete delete, Exception ex) { listener.postDelete(shardId, delete, ex); } catch (Exception inner) { inner.addSuppressed(ex); - logger.warn((Supplier) () -> new ParameterizedMessage("postDelete listener [{}] failed", listener), inner); + logger.warn(() -> new ParameterizedMessage("postDelete listener [{}] failed", listener), inner); } } } diff --git a/server/src/main/java/org/elasticsearch/index/shard/SearchOperationListener.java b/server/src/main/java/org/elasticsearch/index/shard/SearchOperationListener.java index 153a985ab0892..b148d1efba340 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/SearchOperationListener.java +++ b/server/src/main/java/org/elasticsearch/index/shard/SearchOperationListener.java @@ -20,7 +20,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.transport.TransportRequest; @@ -133,7 +132,7 @@ public void onPreQueryPhase(SearchContext searchContext) { try { listener.onPreQueryPhase(searchContext); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("onPreQueryPhase listener [{}] failed", listener), e); + logger.warn(() -> new ParameterizedMessage("onPreQueryPhase listener [{}] failed", listener), e); } } } @@ -144,7 +143,7 @@ public void onFailedQueryPhase(SearchContext searchContext) { try { listener.onFailedQueryPhase(searchContext); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("onFailedQueryPhase listener [{}] failed", listener), e); + logger.warn(() -> new ParameterizedMessage("onFailedQueryPhase listener [{}] failed", listener), e); } } } @@ -155,7 +154,7 @@ public void onQueryPhase(SearchContext searchContext, long tookInNanos) { try { listener.onQueryPhase(searchContext, tookInNanos); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("onQueryPhase listener [{}] failed", listener), e); + logger.warn(() -> new ParameterizedMessage("onQueryPhase listener [{}] failed", listener), e); } } } @@ -166,7 +165,7 @@ public void onPreFetchPhase(SearchContext searchContext) { try { listener.onPreFetchPhase(searchContext); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("onPreFetchPhase listener [{}] failed", listener), e); + logger.warn(() -> new ParameterizedMessage("onPreFetchPhase listener [{}] failed", listener), e); } } } @@ -177,7 +176,7 @@ public void onFailedFetchPhase(SearchContext searchContext) { try { listener.onFailedFetchPhase(searchContext); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("onFailedFetchPhase listener [{}] failed", listener), e); + logger.warn(() -> new ParameterizedMessage("onFailedFetchPhase listener [{}] failed", listener), e); } } } @@ -188,7 +187,7 @@ public void onFetchPhase(SearchContext searchContext, long tookInNanos) { try { listener.onFetchPhase(searchContext, tookInNanos); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("onFetchPhase listener [{}] failed", listener), e); + logger.warn(() -> new ParameterizedMessage("onFetchPhase listener [{}] failed", listener), e); } } } @@ -199,7 +198,7 @@ public void onNewContext(SearchContext context) { try { listener.onNewContext(context); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("onNewContext listener [{}] failed", listener), e); + logger.warn(() -> new ParameterizedMessage("onNewContext listener [{}] failed", listener), e); } } } @@ -210,7 +209,7 @@ public void onFreeContext(SearchContext context) { try { listener.onFreeContext(context); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("onFreeContext listener [{}] failed", listener), e); + logger.warn(() -> new ParameterizedMessage("onFreeContext listener [{}] failed", listener), e); } } } @@ -221,7 +220,7 @@ public void onNewScrollContext(SearchContext context) { try { listener.onNewScrollContext(context); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("onNewScrollContext listener [{}] failed", listener), e); + logger.warn(() -> new ParameterizedMessage("onNewScrollContext listener [{}] failed", listener), e); } } } @@ -232,7 +231,7 @@ public void onFreeScrollContext(SearchContext context) { try { listener.onFreeScrollContext(context); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("onFreeScrollContext listener [{}] failed", listener), e); + logger.warn(() -> new ParameterizedMessage("onFreeScrollContext listener [{}] failed", listener), e); } } } diff --git a/server/src/main/java/org/elasticsearch/index/store/Store.java b/server/src/main/java/org/elasticsearch/index/store/Store.java index be9164cec5744..e560a0b040b0b 100644 --- a/server/src/main/java/org/elasticsearch/index/store/Store.java +++ b/server/src/main/java/org/elasticsearch/index/store/Store.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.index.CheckIndex; import org.apache.lucene.index.CorruptIndexException; @@ -329,7 +328,7 @@ public int compare(Map.Entry o1, Map.Entry o2) { directory.deleteFile(origFile); } catch (FileNotFoundException | NoSuchFileException e) { } catch (Exception ex) { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to delete file [{}]", origFile), ex); + logger.debug(() -> new ParameterizedMessage("failed to delete file [{}]", origFile), ex); } // now, rename the files... and fail it it won't work directory.rename(tempFile, origFile); @@ -462,7 +461,7 @@ public static MetadataSnapshot readMetadataSnapshot(Path indexLocation, ShardId } catch (FileNotFoundException | NoSuchFileException ex) { logger.info("Failed to open / find files while reading metadata snapshot"); } catch (ShardLockObtainFailedException ex) { - logger.info((Supplier) () -> new ParameterizedMessage("{}: failed to obtain shard lock", shardId), ex); + logger.info(() -> new ParameterizedMessage("{}: failed to obtain shard lock", shardId), ex); } return MetadataSnapshot.EMPTY; } @@ -476,7 +475,7 @@ public static boolean canOpenIndex(Logger logger, Path indexLocation, ShardId sh try { tryOpenIndex(indexLocation, shardId, shardLocker, logger); } catch (Exception ex) { - logger.trace((Supplier) () -> new ParameterizedMessage("Can't open index for path [{}]", indexLocation), ex); + logger.trace(() -> new ParameterizedMessage("Can't open index for path [{}]", indexLocation), ex); return false; } return true; @@ -676,7 +675,7 @@ public void cleanupAndVerify(String reason, MetadataSnapshot sourceMetaData) thr // if one of those files can't be deleted we better fail the cleanup otherwise we might leave an old commit point around? throw new IllegalStateException("Can't delete " + existingFile + " - cleanup failed", ex); } - logger.debug((Supplier) () -> new ParameterizedMessage("failed to delete file [{}]", existingFile), ex); + logger.debug(() -> new ParameterizedMessage("failed to delete file [{}]", existingFile), ex); // ignore, we don't really care, will get deleted later on } } @@ -886,7 +885,7 @@ static LoadedMetadata loadMetadata(IndexCommit commit, Directory directory, Logg // Lucene checks the checksum after it tries to lookup the codec etc. // in that case we might get only IAE or similar exceptions while we are really corrupt... // TODO we should check the checksum in lucene if we hit an exception - logger.warn((Supplier) () -> new ParameterizedMessage("failed to build store metadata. checking segment info integrity (with commit [{}])", commit == null ? "no" : "yes"), ex); + logger.warn(() -> new ParameterizedMessage("failed to build store metadata. checking segment info integrity (with commit [{}])", commit == null ? "no" : "yes"), ex); Lucene.checkSegmentInfoIntegrity(directory); } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException cex) { cex.addSuppressed(ex); @@ -921,7 +920,7 @@ private static void checksumFromLuceneFile(Directory directory, String file, Map } } catch (Exception ex) { - logger.debug((Supplier) () -> new ParameterizedMessage("Can retrieve checksum from file [{}]", file), ex); + logger.debug(() -> new ParameterizedMessage("Can retrieve checksum from file [{}]", file), ex); throw ex; } builder.put(file, new StoreFileMetaData(file, length, checksum, version, fileHash.get())); diff --git a/server/src/main/java/org/elasticsearch/index/translog/Translog.java b/server/src/main/java/org/elasticsearch/index/translog/Translog.java index c2d494fd07a34..0043472b72f7c 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/server/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.translog; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.Term; import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.core.internal.io.IOUtils; @@ -262,7 +261,7 @@ private ArrayList recoverFromFiles(Checkpoint checkpoint) throws try { Files.delete(tempFile); } catch (IOException ex) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to delete temp file {}", tempFile), ex); + logger.warn(() -> new ParameterizedMessage("failed to delete temp file {}", tempFile), ex); } } } diff --git a/server/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java b/server/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java index 73ba9342175d4..e4eeee27e9ecc 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java +++ b/server/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java @@ -20,7 +20,6 @@ package org.elasticsearch.indices; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; @@ -179,7 +178,7 @@ public void doRun() { @Override public void onFailure(Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to write indexing buffer for shard [{}]; ignoring", shard.shardId()), e); + logger.warn(() -> new ParameterizedMessage("failed to write indexing buffer for shard [{}]; ignoring", shard.shardId()), e); } }); } @@ -384,7 +383,7 @@ protected void checkIdle(IndexShard shard, long inactiveTimeNS) { try { shard.checkIdle(inactiveTimeNS); } catch (AlreadyClosedException e) { - logger.trace((Supplier) () -> new ParameterizedMessage("ignore exception while checking if shard {} is inactive", shard.shardId()), e); + logger.trace(() -> new ParameterizedMessage("ignore exception while checking if shard {} is inactive", shard.shardId()), e); } } } diff --git a/server/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java b/server/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java index 1712f90c206ec..4a55b86291e63 100644 --- a/server/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java +++ b/server/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java @@ -19,7 +19,6 @@ package org.elasticsearch.indices.analysis; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.analysis.hunspell.Dictionary; import org.apache.lucene.store.Directory; import org.apache.lucene.store.SimpleFSDirectory; @@ -140,8 +139,7 @@ private void scanAndLoadDictionaries() throws IOException { } catch (Exception e) { // The cache loader throws unchecked exception (see #loadDictionary()), // here we simply report the exception and continue loading the dictionaries - logger.error( - (Supplier) () -> new ParameterizedMessage( + logger.error(() -> new ParameterizedMessage( "exception while loading dictionary {}", file.getFileName()), e); } } @@ -200,7 +198,7 @@ private Dictionary loadDictionary(String locale, Settings nodeSettings, Environm } } catch (Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("Could not load hunspell dictionary [{}]", locale), e); + logger.error(() -> new ParameterizedMessage("Could not load hunspell dictionary [{}]", locale), e); throw e; } finally { IOUtils.close(affixStream); diff --git a/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index d17740ed60004..472cb04936d64 100644 --- a/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.store.LockObtainFailedException; import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.Version; @@ -307,8 +306,7 @@ private void deleteIndices(final ClusterChangedEvent event) { threadPool.generic().execute(new AbstractRunnable() { @Override public void onFailure(Exception e) { - logger.warn( - (Supplier) () -> new ParameterizedMessage("[{}] failed to complete pending deletion for index", index), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to complete pending deletion for index", index), e); } @Override @@ -670,8 +668,7 @@ private void failAndRemoveShard(ShardRouting shardRouting, boolean sendShardFail // the node got closed on us, ignore it } catch (Exception inner) { inner.addSuppressed(failure); - logger.warn( - (Supplier) () -> new ParameterizedMessage( + logger.warn(() -> new ParameterizedMessage( "[{}][{}] failed to remove shard after failure ([{}])", shardRouting.getIndexName(), shardRouting.getId(), @@ -685,15 +682,13 @@ private void failAndRemoveShard(ShardRouting shardRouting, boolean sendShardFail private void sendFailShard(ShardRouting shardRouting, String message, @Nullable Exception failure, ClusterState state) { try { - logger.warn( - (Supplier) () -> new ParameterizedMessage( + logger.warn(() -> new ParameterizedMessage( "[{}] marking and sending shard failed due to [{}]", shardRouting.shardId(), message), failure); failedShardsCache.put(shardRouting.shardId(), shardRouting); shardStateAction.localShardFailed(shardRouting, message, failure, SHARD_STATE_ACTION_LISTENER, state); } catch (Exception inner) { if (failure != null) inner.addSuppressed(failure); - logger.warn( - (Supplier) () -> new ParameterizedMessage( + logger.warn(() -> new ParameterizedMessage( "[{}][{}] failed to mark shard as failed (because of [{}])", shardRouting.getIndexName(), shardRouting.getId(), diff --git a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java index fb572d015ed43..553744e66ef04 100644 --- a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java +++ b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java @@ -19,7 +19,6 @@ package org.elasticsearch.indices.flush; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -107,7 +106,7 @@ public void onResponse(ShardsSyncedFlushResult syncedFlushResult) { @Override public void onFailure(Exception e) { - logger.debug((Supplier) () -> new ParameterizedMessage("{} sync flush on inactive shard failed", indexShard.shardId()), e); + logger.debug(() -> new ParameterizedMessage("{} sync flush on inactive shard failed", indexShard.shardId()), e); } }); } @@ -397,7 +396,7 @@ public void handleResponse(ShardSyncedFlushResponse response) { @Override public void handleException(TransportException exp) { - logger.trace((Supplier) () -> new ParameterizedMessage("{} error while performing synced flush on [{}], skipping", shardId, shard), exp); + logger.trace(() -> new ParameterizedMessage("{} error while performing synced flush on [{}], skipping", shardId, shard), exp); results.put(shard, new ShardSyncedFlushResponse(exp.getMessage())); countDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results); } @@ -453,7 +452,7 @@ public void handleResponse(PreSyncedFlushResponse response) { @Override public void handleException(TransportException exp) { - logger.trace((Supplier) () -> new ParameterizedMessage("{} error while performing pre synced flush on [{}], skipping", shardId, shard), exp); + logger.trace(() -> new ParameterizedMessage("{} error while performing pre synced flush on [{}], skipping", shardId, shard), exp); if (countDown.countDown()) { listener.onResponse(presyncResponses); } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index 73764249ce128..cb49eed25f8fe 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.store.AlreadyClosedException; @@ -144,8 +143,7 @@ public void startRecovery(final IndexShard indexShard, final DiscoveryNode sourc } protected void retryRecovery(final long recoveryId, final Throwable reason, TimeValue retryAfter, TimeValue activityTimeout) { - logger.trace( - (Supplier) () -> new ParameterizedMessage( + logger.trace(() -> new ParameterizedMessage( "will retry recovery with id [{}] in [{}]", recoveryId, retryAfter), reason); retryRecovery(recoveryId, retryAfter, activityTimeout); } @@ -229,12 +227,8 @@ public RecoveryResponse newInstance() { logger.trace("recovery cancelled", e); } catch (Exception e) { if (logger.isTraceEnabled()) { - logger.trace( - (Supplier) () -> new ParameterizedMessage( - "[{}][{}] Got exception on recovery", - request.shardId().getIndex().getName(), - request.shardId().id()), - e); + logger.trace(() -> new ParameterizedMessage( + "[{}][{}] Got exception on recovery", request.shardId().getIndex().getName(), request.shardId().id()), e); } Throwable cause = ExceptionsHelper.unwrapCause(e); if (cause instanceof CancellableThreads.ExecutionCancelledException) { @@ -532,12 +526,9 @@ public void onTimeout(TimeValue timeout) { long currentVersion = future.get(); logger.trace("successfully waited for cluster state with version {} (current: {})", clusterStateVersion, currentVersion); } catch (Exception e) { - logger.debug( - (Supplier) () -> new ParameterizedMessage( + logger.debug(() -> new ParameterizedMessage( "failed waiting for cluster state with version {} (current: {})", - clusterStateVersion, - clusterService.state().getVersion()), - e); + clusterStateVersion, clusterService.state().getVersion()), e); throw ExceptionsHelper.convertToRuntime(e); } } @@ -615,16 +606,13 @@ class RecoveryRunner extends AbstractRunnable { public void onFailure(Exception e) { try (RecoveryRef recoveryRef = onGoingRecoveries.getRecovery(recoveryId)) { if (recoveryRef != null) { - logger.error( - (Supplier) () -> new ParameterizedMessage( - "unexpected error during recovery [{}], failing shard", recoveryId), e); + logger.error(() -> new ParameterizedMessage("unexpected error during recovery [{}], failing shard", recoveryId), e); onGoingRecoveries.failRecovery(recoveryId, new RecoveryFailedException(recoveryRef.target().state(), "unexpected error", e), true // be safe ); } else { - logger.debug( - (Supplier) () -> new ParameterizedMessage( + logger.debug(() -> new ParameterizedMessage( "unexpected error during recovery, but recovery id [{}] is finished", recoveryId), e); } } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java index 6b81d34ab5fe3..bbb02231e7a59 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.unit.TimeValue; @@ -269,7 +268,7 @@ private RecoveryMonitor(long recoveryId, long lastSeenAccessTime, TimeValue chec @Override public void onFailure(Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected error while monitoring recovery [{}]", recoveryId), e); + logger.error(() -> new ParameterizedMessage("unexpected error while monitoring recovery [{}]", recoveryId), e); } @Override diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 42b28506c0506..d6a802c30660c 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -407,12 +407,9 @@ public void phase1(final IndexCommit snapshot, final Supplier translogO RemoteTransportException exception = new RemoteTransportException("File corruption occurred on recovery but " + "checksums are ok", null); exception.addSuppressed(targetException); - logger.warn( - (org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( + logger.warn(() -> new ParameterizedMessage( "{} Remote file corruption during finalization of recovery on node {}. local checksum OK", - shard.shardId(), - request.targetNode()), - corruptIndexException); + shard.shardId(), request.targetNode()), corruptIndexException); throw exception; } else { throw targetException; @@ -681,13 +678,9 @@ void sendFiles(Store store, StoreFileMetaData[] files, Function) () -> new ParameterizedMessage( + logger.warn(() -> new ParameterizedMessage( "{} Remote file corruption on node {}, recovering {}. local checksum OK", - shardId, - request.targetNode(), - md), - corruptIndexException); + shardId, request.targetNode(), md), corruptIndexException); throw exception; } } else { diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index 1b1a2802b52bd..b28e992d9fd5d 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexFormatTooNewException; import org.apache.lucene.index.IndexFormatTooOldException; @@ -331,8 +330,7 @@ protected void closeInternal() { try { entry.getValue().close(); } catch (Exception e) { - logger.debug( - (Supplier) () -> new ParameterizedMessage("error while closing recovery output [{}]", entry.getValue()), e); + logger.debug(() -> new ParameterizedMessage("error while closing recovery output [{}]", entry.getValue()), e); } iterator.remove(); } diff --git a/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java b/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java index 294484c659863..37f67ddf102ac 100644 --- a/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java +++ b/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java @@ -20,7 +20,6 @@ package org.elasticsearch.indices.store; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -256,7 +255,7 @@ public void handleResponse(ShardActiveResponse response) { @Override public void handleException(TransportException exp) { - logger.debug((Supplier) () -> new ParameterizedMessage("shards active request failed for {}", shardId), exp); + logger.debug(() -> new ParameterizedMessage("shards active request failed for {}", shardId), exp); if (awaitingResponses.decrementAndGet() == 0) { allNodesResponded(); } @@ -288,10 +287,10 @@ private void allNodesResponded() { try { indicesService.deleteShardStore("no longer used", shardId, currentState); } catch (Exception ex) { - logger.debug((Supplier) () -> new ParameterizedMessage("{} failed to delete unallocated shard, ignoring", shardId), ex); + logger.debug(() -> new ParameterizedMessage("{} failed to delete unallocated shard, ignoring", shardId), ex); } }, - (source, e) -> logger.error((Supplier) () -> new ParameterizedMessage("{} unexpected error during deletion of unallocated shard", shardId), e) + (source, e) -> logger.error(() -> new ParameterizedMessage("{} unexpected error during deletion of unallocated shard", shardId), e) ); } @@ -340,9 +339,9 @@ public void sendResult(boolean shardActive) { try { channel.sendResponse(new ShardActiveResponse(shardActive, clusterService.localNode())); } catch (IOException e) { - logger.error((Supplier) () -> new ParameterizedMessage("failed send response for shard active while trying to delete shard {} - shard will probably not be removed", request.shardId), e); + logger.error(() -> new ParameterizedMessage("failed send response for shard active while trying to delete shard {} - shard will probably not be removed", request.shardId), e); } catch (EsRejectedExecutionException e) { - logger.error((Supplier) () -> new ParameterizedMessage("failed send response for shard active while trying to delete shard {} - shard will probably not be removed", request.shardId), e); + logger.error(() -> new ParameterizedMessage("failed send response for shard active while trying to delete shard {} - shard will probably not be removed", request.shardId), e); } } }, newState -> { diff --git a/server/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java b/server/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java index f88ddcf482530..cacba54d80ad4 100644 --- a/server/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java +++ b/server/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java @@ -20,7 +20,6 @@ package org.elasticsearch.monitor.fs; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.Constants; import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.DiskUsage; @@ -123,8 +122,7 @@ final FsInfo.IoStats ioStats(final Set> devicesNumbers, } catch (Exception e) { // do not fail Elasticsearch if something unexpected // happens here - logger.debug( - (Supplier) () -> new ParameterizedMessage( + logger.debug(() -> new ParameterizedMessage( "unexpected exception processing /proc/diskstats for devices {}", devicesNumbers), e); return null; } diff --git a/server/src/main/java/org/elasticsearch/persistent/AllocatedPersistentTask.java b/server/src/main/java/org/elasticsearch/persistent/AllocatedPersistentTask.java index a0572f93e5e00..b311e559c6e91 100644 --- a/server/src/main/java/org/elasticsearch/persistent/AllocatedPersistentTask.java +++ b/server/src/main/java/org/elasticsearch/persistent/AllocatedPersistentTask.java @@ -20,7 +20,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; import org.elasticsearch.common.Nullable; @@ -148,8 +147,7 @@ private void completeAndNotifyIfNeeded(@Nullable Exception failure) { logger.warn("attempt to complete task [{}] with id [{}] in the [{}] state", getAction(), getPersistentTaskId(), prevState); } else { if (failure != null) { - logger.warn((Supplier) () -> new ParameterizedMessage( - "task {} failed with an exception", getPersistentTaskId()), failure); + logger.warn(() -> new ParameterizedMessage("task {} failed with an exception", getPersistentTaskId()), failure); } try { this.failure = failure; @@ -165,9 +163,8 @@ public void onResponse(PersistentTasksCustomMetaData.PersistentTask persisten @Override public void onFailure(Exception e) { - logger.warn((Supplier) () -> - new ParameterizedMessage("notification for task [{}] with id [{}] failed", - getAction(), getPersistentTaskId()), e); + logger.warn(() -> new ParameterizedMessage( + "notification for task [{}] with id [{}] failed", getAction(), getPersistentTaskId()), e); } }); } diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java index e53834d6f4655..6c410bc41a220 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java @@ -19,7 +19,6 @@ package org.elasticsearch.persistent; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -207,9 +206,9 @@ public void onResponse(CancelTasksResponse cancelTasksResponse) { @Override public void onFailure(Exception e) { // There is really nothing we can do in case of failure here - logger.warn((Supplier) () -> - new ParameterizedMessage("failed to cancel task [{}] with id [{}] and allocation id [{}]", task.getAction(), - task.getPersistentTaskId(), task.getAllocationId()), e); + logger.warn(() -> new ParameterizedMessage( + "failed to cancel task [{}] with id [{}] and allocation id [{}]", + task.getAction(), task.getPersistentTaskId(), task.getAllocationId()), e); } }); } diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java index 2efbae5961e9d..577ccc78de7b8 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java @@ -20,7 +20,6 @@ package org.elasticsearch.repositories; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -142,7 +141,7 @@ public ClusterState execute(ClusterState currentState) throws IOException { @Override public void onFailure(String source, Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to create repository [{}]", request.name), e); + logger.warn(() -> new ParameterizedMessage("failed to create repository [{}]", request.name), e); super.onFailure(source, e); } @@ -217,7 +216,7 @@ public void onResponse(VerifyResponse verifyResponse) { try { repository.endVerification(verificationToken); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to finish repository verification", repositoryName), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to finish repository verification", repositoryName), e); listener.onFailure(e); return; } @@ -234,7 +233,7 @@ public void onFailure(Exception e) { repository.endVerification(verificationToken); } catch (Exception inner) { inner.addSuppressed(e); - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to finish repository verification", repositoryName), inner); + logger.warn(() -> new ParameterizedMessage("[{}] failed to finish repository verification", repositoryName), inner); } listener.onFailure(e); } @@ -296,14 +295,14 @@ public void applyClusterState(ClusterChangedEvent event) { } catch (RepositoryException ex) { // TODO: this catch is bogus, it means the old repo is already closed, // but we have nothing to replace it - logger.warn((Supplier) () -> new ParameterizedMessage("failed to change repository [{}]", repositoryMetaData.name()), ex); + logger.warn(() -> new ParameterizedMessage("failed to change repository [{}]", repositoryMetaData.name()), ex); } } } else { try { repository = createRepository(repositoryMetaData); } catch (RepositoryException ex) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to create repository [{}]", repositoryMetaData.name()), ex); + logger.warn(() -> new ParameterizedMessage("failed to create repository [{}]", repositoryMetaData.name()), ex); } } if (repository != null) { @@ -385,7 +384,7 @@ private Repository createRepository(RepositoryMetaData repositoryMetaData) { repository.start(); return repository; } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to create repository [{}][{}]", repositoryMetaData.type(), repositoryMetaData.name()), e); + logger.warn(() -> new ParameterizedMessage("failed to create repository [{}][{}]", repositoryMetaData.type(), repositoryMetaData.name()), e); throw new RepositoryException(repositoryMetaData.name(), "failed to create repository", e); } } diff --git a/server/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java b/server/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java index cc1170a4841a2..ba3f9c048d08a 100644 --- a/server/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java @@ -22,7 +22,6 @@ import com.carrotsearch.hppc.ObjectContainer; import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -81,7 +80,7 @@ public void verify(String repository, String verificationToken, final ActionList try { doVerify(repository, verificationToken, localNode); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to verify repository", repository), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to verify repository", repository), e); errors.add(new VerificationFailure(node.getId(), e)); } if (counter.decrementAndGet() == 0) { @@ -152,7 +151,7 @@ public void messageReceived(VerifyNodeRepositoryRequest request, TransportChanne try { doVerify(request.repository, request.verificationToken, localNode); } catch (Exception ex) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to verify repository", request.repository), ex); + logger.warn(() -> new ParameterizedMessage("[{}] failed to verify repository", request.repository), ex); throw ex; } channel.sendResponse(TransportResponse.Empty.INSTANCE); diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 330b2d2998627..020ea6a0f0887 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -20,7 +20,6 @@ package org.elasticsearch.repositories.blobstore; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexFormatTooNewException; @@ -351,7 +350,7 @@ public void deleteSnapshot(SnapshotId snapshotId, long repositoryStateId) { } catch (SnapshotMissingException ex) { throw ex; } catch (IllegalStateException | SnapshotException | ElasticsearchParseException ex) { - logger.warn((Supplier) () -> new ParameterizedMessage("cannot read snapshot file [{}]", snapshotId), ex); + logger.warn(() -> new ParameterizedMessage("cannot read snapshot file [{}]", snapshotId), ex); } MetaData metaData = null; try { @@ -361,7 +360,7 @@ public void deleteSnapshot(SnapshotId snapshotId, long repositoryStateId) { metaData = readSnapshotMetaData(snapshotId, null, repositoryData.resolveIndices(indices), true); } } catch (IOException | SnapshotException ex) { - logger.warn((Supplier) () -> new ParameterizedMessage("cannot read metadata for snapshot [{}]", snapshotId), ex); + logger.warn(() -> new ParameterizedMessage("cannot read metadata for snapshot [{}]", snapshotId), ex); } try { // Delete snapshot from the index file, since it is the maintainer of truth of active snapshots @@ -381,7 +380,7 @@ public void deleteSnapshot(SnapshotId snapshotId, long repositoryStateId) { try { indexMetaDataFormat.delete(indexMetaDataBlobContainer, snapshotId.getUUID()); } catch (IOException ex) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to delete metadata for index [{}]", snapshotId, index), ex); + logger.warn(() -> new ParameterizedMessage("[{}] failed to delete metadata for index [{}]", snapshotId, index), ex); } if (metaData != null) { IndexMetaData indexMetaData = metaData.index(index); @@ -391,7 +390,7 @@ public void deleteSnapshot(SnapshotId snapshotId, long repositoryStateId) { delete(snapshotId, snapshot.version(), indexId, new ShardId(indexMetaData.getIndex(), shardId)); } catch (SnapshotException ex) { final int finalShardId = shardId; - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to delete shard data for shard [{}][{}]", snapshotId, index, finalShardId), ex); + logger.warn(() -> new ParameterizedMessage("[{}] failed to delete shard data for shard [{}][{}]", snapshotId, index, finalShardId), ex); } } } @@ -410,11 +409,11 @@ public void deleteSnapshot(SnapshotId snapshotId, long repositoryStateId) { // we'll ignore that and accept that cleanup didn't fully succeed. // since we are using UUIDs for path names, this won't be an issue for // snapshotting indices of the same name - logger.debug((Supplier) () -> new ParameterizedMessage("[{}] index [{}] no longer part of any snapshots in the repository, but failed to clean up " + + logger.debug(() -> new ParameterizedMessage("[{}] index [{}] no longer part of any snapshots in the repository, but failed to clean up " + "its index folder due to the directory not being empty.", metadata.name(), indexId), dnee); } catch (IOException ioe) { // a different IOException occurred while trying to delete - will just log the issue for now - logger.debug((Supplier) () -> new ParameterizedMessage("[{}] index [{}] no longer part of any snapshots in the repository, but failed to clean up " + + logger.debug(() -> new ParameterizedMessage("[{}] index [{}] no longer part of any snapshots in the repository, but failed to clean up " + "its index folder.", metadata.name(), indexId), ioe); } } @@ -428,10 +427,10 @@ private void deleteSnapshotBlobIgnoringErrors(final SnapshotInfo snapshotInfo, f snapshotFormat.delete(snapshotsBlobContainer, blobId); } catch (IOException e) { if (snapshotInfo != null) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] Unable to delete snapshot file [{}]", + logger.warn(() -> new ParameterizedMessage("[{}] Unable to delete snapshot file [{}]", snapshotInfo.snapshotId(), blobId), e); } else { - logger.warn((Supplier) () -> new ParameterizedMessage("Unable to delete snapshot file [{}]", blobId), e); + logger.warn(() -> new ParameterizedMessage("Unable to delete snapshot file [{}]", blobId), e); } } } @@ -441,10 +440,10 @@ private void deleteGlobalMetaDataBlobIgnoringErrors(final SnapshotInfo snapshotI globalMetaDataFormat.delete(snapshotsBlobContainer, blobId); } catch (IOException e) { if (snapshotInfo != null) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] Unable to delete global metadata file [{}]", + logger.warn(() -> new ParameterizedMessage("[{}] Unable to delete global metadata file [{}]", snapshotInfo.snapshotId(), blobId), e); } else { - logger.warn((Supplier) () -> new ParameterizedMessage("Unable to delete global metadata file [{}]", blobId), e); + logger.warn(() -> new ParameterizedMessage("Unable to delete global metadata file [{}]", blobId), e); } } } @@ -522,7 +521,7 @@ private MetaData readSnapshotMetaData(SnapshotId snapshotId, Version snapshotVer metaDataBuilder.put(indexMetaDataFormat.read(indexMetaDataBlobContainer, snapshotId.getUUID()), false); } catch (ElasticsearchParseException | IOException ex) { if (ignoreIndexErrors) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] [{}] failed to read metadata for index", snapshotId, index.getName()), ex); + logger.warn(() -> new ParameterizedMessage("[{}] [{}] failed to read metadata for index", snapshotId, index.getName()), ex); } else { throw ex; } @@ -983,7 +982,7 @@ protected void finalize(List snapshots, int fileListGeneration, M blobContainer.deleteBlob(blobName); } catch (IOException e) { // TODO: don't catch and let the user handle it? - logger.debug((Supplier) () -> new ParameterizedMessage("[{}] [{}] error deleting blob [{}] during cleanup", snapshotId, shardId, blobName), e); + logger.debug(() -> new ParameterizedMessage("[{}] [{}] error deleting blob [{}] during cleanup", snapshotId, shardId, blobName), e); } } } @@ -1062,7 +1061,7 @@ protected Tuple buildBlobStoreIndexShardS return new Tuple<>(shardSnapshots, latest); } catch (IOException e) { final String file = SNAPSHOT_INDEX_PREFIX + latest; - logger.warn((Supplier) () -> new ParameterizedMessage("failed to read index file [{}]", file), e); + logger.warn(() -> new ParameterizedMessage("failed to read index file [{}]", file), e); } } else if (blobKeys.isEmpty() == false) { logger.debug("Could not find a readable index-N file in a non-empty shard snapshot directory [{}]", blobContainer.path()); @@ -1080,7 +1079,7 @@ protected Tuple buildBlobStoreIndexShardS snapshots.add(new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles())); } } catch (IOException e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to read commit point [{}]", name), e); + logger.warn(() -> new ParameterizedMessage("failed to read commit point [{}]", name), e); } } return new Tuple<>(new BlobStoreIndexShardSnapshots(snapshots), -1); @@ -1166,7 +1165,7 @@ public void snapshot(final IndexCommit snapshotIndexCommit) { // in a bwc compatible way. maybeRecalculateMetadataHash(blobContainer, fileInfo, metadata); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("{} Can't calculate hash from blob for file [{}] [{}]", shardId, fileInfo.physicalName(), fileInfo.metadata()), e); + logger.warn(() -> new ParameterizedMessage("{} Can't calculate hash from blob for file [{}] [{}]", shardId, fileInfo.physicalName(), fileInfo.metadata()), e); } if (fileInfo.isSame(md) && snapshotFileExistsInBlobs(fileInfo, blobs)) { // a commit point file with the same name, size and checksum was already copied to repository @@ -1441,7 +1440,7 @@ public void restore() throws IOException { logger.trace("[{}] [{}] restoring from to an empty shard", shardId, snapshotId); recoveryTargetMetadata = Store.MetadataSnapshot.EMPTY; } catch (IOException e) { - logger.warn((Supplier) () -> new ParameterizedMessage("{} Can't read metadata from store, will not reuse any local file while restoring", shardId), e); + logger.warn(() -> new ParameterizedMessage("{} Can't read metadata from store, will not reuse any local file while restoring", shardId), e); recoveryTargetMetadata = Store.MetadataSnapshot.EMPTY; } @@ -1457,7 +1456,7 @@ public void restore() throws IOException { maybeRecalculateMetadataHash(blobContainer, fileInfo, recoveryTargetMetadata); } catch (Exception e) { // if the index is broken we might not be able to read it - logger.warn((Supplier) () -> new ParameterizedMessage("{} Can't calculate hash from blog for file [{}] [{}]", shardId, fileInfo.physicalName(), fileInfo.metadata()), e); + logger.warn(() -> new ParameterizedMessage("{} Can't calculate hash from blog for file [{}] [{}]", shardId, fileInfo.physicalName(), fileInfo.metadata()), e); } snapshotMetaData.put(fileInfo.metadata().name(), fileInfo.metadata()); fileInfos.put(fileInfo.metadata().name(), fileInfo); diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java index 0a929cc8f0bc1..e6b54a20a1e07 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -24,7 +24,6 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.IndicesOptions; @@ -455,7 +454,7 @@ private void restoreGlobalStateIfRequested(MetaData.Builder mdBuilder) { @Override public void onFailure(String source, Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to restore snapshot", snapshotId), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to restore snapshot", snapshotId), e); listener.onFailure(e); } @@ -472,7 +471,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to restore snapshot", request.repositoryName + ":" + request.snapshotName), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to restore snapshot", request.repositoryName + ":" + request.snapshotName), e); listener.onFailure(e); } } @@ -679,7 +678,7 @@ public ClusterTasksResult execute(final ClusterState currentState, final L @Override public void onFailure(final String source, final Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); + logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e); } @Override diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index df955c2e3b63d..33b4d85298799 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -21,7 +21,6 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; @@ -340,8 +339,7 @@ public void doRun() { @Override public void onFailure(Exception e) { - logger.warn((Supplier) () -> - new ParameterizedMessage("[{}][{}] failed to snapshot shard", shardId, snapshot), e); + logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to snapshot shard", shardId, snapshot), e); failure.set(e); } @@ -531,7 +529,7 @@ void sendSnapshotShardUpdate(final Snapshot snapshot, final ShardId shardId, fin UpdateIndexShardSnapshotStatusRequest request = new UpdateIndexShardSnapshotStatusRequest(snapshot, shardId, status); transportService.sendRequest(transportService.getLocalNode(), UPDATE_SNAPSHOT_STATUS_ACTION_NAME, request, INSTANCE_SAME); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] [{}] failed to update snapshot state", snapshot, status), e); + logger.warn(() -> new ParameterizedMessage("[{}] [{}] failed to update snapshot state", snapshot, status), e); } } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index bf8edcf576704..287bb2fed22a7 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -22,7 +22,6 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; @@ -192,7 +191,7 @@ public List snapshots(final String repositoryName, } } catch (Exception ex) { if (ignoreUnavailable) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to get snapshot [{}]", snapshotId), ex); + logger.warn(() -> new ParameterizedMessage("failed to get snapshot [{}]", snapshotId), ex); } else { throw new SnapshotException(repositoryName, snapshotId, "Snapshot could not be read", ex); } @@ -270,7 +269,7 @@ public ClusterState execute(ClusterState currentState) { @Override public void onFailure(String source, Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}][{}] failed to create snapshot", repositoryName, snapshotName), e); + logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to create snapshot", repositoryName, snapshotName), e); newSnapshot = null; listener.onFailure(e); } @@ -432,7 +431,7 @@ public ClusterState execute(ClusterState currentState) { @Override public void onFailure(String source, Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to create snapshot", snapshot.snapshot().getSnapshotId()), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to create snapshot", snapshot.snapshot().getSnapshotId()), e); removeSnapshotFromClusterState(snapshot.snapshot(), null, e, new CleanupAfterErrorListener(snapshot, true, userCreateSnapshotListener, e)); } @@ -463,7 +462,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS } }); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to create snapshot [{}]", snapshot.snapshot().getSnapshotId()), e); + logger.warn(() -> new ParameterizedMessage("failed to create snapshot [{}]", snapshot.snapshot().getSnapshotId()), e); removeSnapshotFromClusterState(snapshot.snapshot(), null, e, new CleanupAfterErrorListener(snapshot, snapshotCreated, userCreateSnapshotListener, e)); } } @@ -511,7 +510,7 @@ private void cleanupAfterError(Exception exception) { snapshot.includeGlobalState()); } catch (Exception inner) { inner.addSuppressed(exception); - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to close snapshot in repository", snapshot.snapshot()), inner); + logger.warn(() -> new ParameterizedMessage("[{}] failed to close snapshot in repository", snapshot.snapshot()), inner); } } userCreateSnapshotListener.onFailure(e); @@ -824,7 +823,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { @Override public void onFailure(String source, Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to update snapshot state after shards started from [{}] ", source), e); + logger.warn(() -> new ParameterizedMessage("failed to update snapshot state after shards started from [{}] ", source), e); } }); } @@ -983,7 +982,7 @@ private void endSnapshot(final SnapshotsInProgress.Entry entry, final String fai removeSnapshotFromClusterState(snapshot, snapshotInfo, null); logger.info("snapshot [{}] completed with state [{}]", snapshot, snapshotInfo.state()); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to finalize snapshot", snapshot), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to finalize snapshot", snapshot), e); removeSnapshotFromClusterState(snapshot, null, e); } }); @@ -1032,7 +1031,7 @@ public ClusterState execute(ClusterState currentState) { @Override public void onFailure(String source, Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to remove snapshot metadata", snapshot), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to remove snapshot metadata", snapshot), e); if (listener != null) { listener.onFailure(e); } @@ -1055,7 +1054,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS listener.onSnapshotFailure(snapshot, failure); } } catch (Exception t) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to notify listener [{}]", listener), t); + logger.warn(() -> new ParameterizedMessage("failed to notify listener [{}]", listener), t); } } if (listener != null) { @@ -1224,8 +1223,7 @@ public void onSnapshotCompletion(Snapshot completedSnapshot, SnapshotInfo snapsh listener, true); } catch (Exception ex) { - logger.warn((Supplier) () -> - new ParameterizedMessage("[{}] failed to delete snapshot", snapshot), ex); + logger.warn(() -> new ParameterizedMessage("[{}] failed to delete snapshot", snapshot), ex); } } ); @@ -1244,7 +1242,7 @@ public void onSnapshotFailure(Snapshot failedSnapshot, Exception e) { listener, true); } catch (SnapshotMissingException smex) { - logger.info((Supplier) () -> new ParameterizedMessage( + logger.info(() -> new ParameterizedMessage( "Tried deleting in-progress snapshot [{}], but it " + "could not be found after failing to abort.", smex.getSnapshotName()), e); @@ -1339,7 +1337,7 @@ public ClusterState execute(ClusterState currentState) { @Override public void onFailure(String source, Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to remove snapshot deletion metadata", snapshot), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to remove snapshot deletion metadata", snapshot), e); if (listener != null) { listener.onFailure(e); } diff --git a/server/src/main/java/org/elasticsearch/tasks/LoggingTaskListener.java b/server/src/main/java/org/elasticsearch/tasks/LoggingTaskListener.java index b42b882f8f5df..79424541810c4 100644 --- a/server/src/main/java/org/elasticsearch/tasks/LoggingTaskListener.java +++ b/server/src/main/java/org/elasticsearch/tasks/LoggingTaskListener.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.common.logging.Loggers; /** @@ -51,6 +50,6 @@ public void onResponse(Task task, Response response) { @Override public void onFailure(Task task, Throwable e) { - logger.warn((Supplier) () -> new ParameterizedMessage("{} failed with exception", task.getId()), e); + logger.warn(() -> new ParameterizedMessage("{} failed with exception", task.getId()), e); } } diff --git a/server/src/main/java/org/elasticsearch/tasks/TaskManager.java b/server/src/main/java/org/elasticsearch/tasks/TaskManager.java index 16212e066bbff..80427b197239d 100644 --- a/server/src/main/java/org/elasticsearch/tasks/TaskManager.java +++ b/server/src/main/java/org/elasticsearch/tasks/TaskManager.java @@ -20,7 +20,6 @@ package org.elasticsearch.tasks; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.ExceptionsHelper; @@ -197,8 +196,7 @@ public void storeResult(Task task, Exception e try { taskResult = task.result(localNode, error); } catch (IOException ex) { - logger.warn( - (Supplier) () -> new ParameterizedMessage("couldn't store error {}", ExceptionsHelper.detailedMessage(error)), ex); + logger.warn(() -> new ParameterizedMessage("couldn't store error {}", ExceptionsHelper.detailedMessage(error)), ex); listener.onFailure(ex); return; } @@ -210,8 +208,7 @@ public void onResponse(Void aVoid) { @Override public void onFailure(Exception e) { - logger.warn( - (Supplier) () -> new ParameterizedMessage("couldn't store error {}", ExceptionsHelper.detailedMessage(error)), e); + logger.warn(() -> new ParameterizedMessage("couldn't store error {}", ExceptionsHelper.detailedMessage(error)), e); listener.onFailure(e); } }); @@ -232,7 +229,7 @@ public void storeResult(Task task, Response re try { taskResult = task.result(localNode, response); } catch (IOException ex) { - logger.warn((Supplier) () -> new ParameterizedMessage("couldn't store response {}", response), ex); + logger.warn(() -> new ParameterizedMessage("couldn't store response {}", response), ex); listener.onFailure(ex); return; } @@ -245,7 +242,7 @@ public void onResponse(Void aVoid) { @Override public void onFailure(Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("couldn't store response {}", response), e); + logger.warn(() -> new ParameterizedMessage("couldn't store response {}", response), e); listener.onFailure(e); } }); diff --git a/server/src/main/java/org/elasticsearch/tasks/TaskResultsService.java b/server/src/main/java/org/elasticsearch/tasks/TaskResultsService.java index 0c6c22671e8dc..de63994457a1f 100644 --- a/server/src/main/java/org/elasticsearch/tasks/TaskResultsService.java +++ b/server/src/main/java/org/elasticsearch/tasks/TaskResultsService.java @@ -185,8 +185,7 @@ public String taskResultIndexMapping() { Streams.copy(is, out); return out.toString(StandardCharsets.UTF_8.name()); } catch (Exception e) { - logger.error( - (Supplier) () -> new ParameterizedMessage( + logger.error(() -> new ParameterizedMessage( "failed to create tasks results index template [{}]", TASK_RESULT_INDEX_MAPPING_FILE), e); throw new IllegalStateException("failed to create tasks results index template [" + TASK_RESULT_INDEX_MAPPING_FILE + "]", e); } diff --git a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java index c7d16d1979b20..b3bcc6b0b081f 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java @@ -20,7 +20,6 @@ package org.elasticsearch.threadpool; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.Counter; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.Version; @@ -351,11 +350,11 @@ public Cancellable scheduleWithFixedDelay(Runnable command, TimeValue interval, return new ReschedulingRunnable(command, interval, executor, this, (e) -> { if (logger.isDebugEnabled()) { - logger.debug((Supplier) () -> new ParameterizedMessage("scheduled task [{}] was rejected on thread pool [{}]", + logger.debug(() -> new ParameterizedMessage("scheduled task [{}] was rejected on thread pool [{}]", command, executor), e); } }, - (e) -> logger.warn((Supplier) () -> new ParameterizedMessage("failed to run scheduled task [{}] on thread pool [{}]", + (e) -> logger.warn(() -> new ParameterizedMessage("failed to run scheduled task [{}] on thread pool [{}]", command, executor), e)); } @@ -443,7 +442,7 @@ public void run() { try { runnable.run(); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to run {}", runnable.toString()), e); + logger.warn(() -> new ParameterizedMessage("failed to run {}", runnable.toString()), e); throw e; } } diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java index aa4dec48b46bd..fb4586d201bd7 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java @@ -19,7 +19,6 @@ package org.elasticsearch.transport; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.core.internal.io.IOUtils; import org.apache.lucene.util.SetOnce; @@ -65,6 +64,7 @@ import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Predicate; +import java.util.function.Supplier; import java.util.stream.Collectors; /** @@ -433,7 +433,7 @@ void collectRemoteNodes(Iterator seedNodes, handshakeNode = transportService.handshake(connection, remoteProfile.getHandshakeTimeout().millis(), (c) -> remoteClusterName.get() == null ? true : c.equals(remoteClusterName.get())); } catch (IllegalStateException ex) { - logger.warn((Supplier) () -> new ParameterizedMessage("seed node {} cluster name mismatch expected " + + logger.warn(() -> new ParameterizedMessage("seed node {} cluster name mismatch expected " + "cluster name {}", connection.getNode(), remoteClusterName.get()), ex); throw ex; } @@ -475,8 +475,7 @@ void collectRemoteNodes(Iterator seedNodes, } catch (ConnectTransportException | IOException | IllegalStateException ex) { // ISE if we fail the handshake with an version incompatible node if (seedNodes.hasNext()) { - logger.debug((Supplier) () -> new ParameterizedMessage("fetching nodes from external cluster {} failed", - clusterAlias), ex); + logger.debug(() -> new ParameterizedMessage("fetching nodes from external cluster {} failed", clusterAlias), ex); collectRemoteNodes(seedNodes, transportService, listener); } else { listener.onFailure(ex); @@ -551,8 +550,7 @@ public void handleResponse(ClusterStateResponse response) { } catch (ConnectTransportException | IllegalStateException ex) { // ISE if we fail the handshake with an version incompatible node // fair enough we can't connect just move on - logger.debug((Supplier) - () -> new ParameterizedMessage("failed to connect to node {}", node), ex); + logger.debug(() -> new ParameterizedMessage("failed to connect to node {}", node), ex); } } } @@ -562,9 +560,7 @@ public void handleResponse(ClusterStateResponse response) { } catch (CancellableThreads.ExecutionCancelledException ex) { listener.onFailure(ex); // we got canceled - fail the listener and step out } catch (Exception ex) { - logger.warn((Supplier) - () -> new ParameterizedMessage("fetching nodes from external cluster {} failed", - clusterAlias), ex); + logger.warn(() -> new ParameterizedMessage("fetching nodes from external cluster {} failed", clusterAlias), ex); collectRemoteNodes(seedNodes, transportService, listener); } } @@ -572,9 +568,7 @@ public void handleResponse(ClusterStateResponse response) { @Override public void handleException(TransportException exp) { assert transportService.getThreadPool().getThreadContext().isSystemContext() == false : "context is a system context"; - logger.warn((Supplier) - () -> new ParameterizedMessage("fetching nodes from external cluster {} failed", clusterAlias), - exp); + logger.warn(() -> new ParameterizedMessage("fetching nodes from external cluster {} failed", clusterAlias), exp); try { IOUtils.closeWhileHandlingException(connection); } finally { diff --git a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java index 395be3fbaa37c..4697ee6fbdd71 100644 --- a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -21,7 +21,6 @@ import com.carrotsearch.hppc.IntHashSet; import com.carrotsearch.hppc.IntSet; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; @@ -354,11 +353,10 @@ protected void innerInnerOnResponse(Void v) { @Override protected void innerOnFailure(Exception e) { if (channel.isOpen()) { - logger.debug( - (Supplier) () -> new ParameterizedMessage("[{}] failed to send ping transport message", node), e); + logger.debug(() -> new ParameterizedMessage("[{}] failed to send ping transport message", node), e); failedPings.inc(); } else { - logger.trace((Supplier) () -> + logger.trace(() -> new ParameterizedMessage("[{}] failed to send ping transport message (channel closed)", node), e); } @@ -545,9 +543,7 @@ public void connectToNode(DiscoveryNode node, ConnectionProfile connectionProfil throw new ConnectTransportException(node, "general node connection failure", e); } finally { if (success == false) { // close the connection if there is a failure - logger.trace( - (Supplier) () -> new ParameterizedMessage( - "failed to connect to [{}], cleaning dangling connections", node)); + logger.trace(() -> new ParameterizedMessage("failed to connect to [{}], cleaning dangling connections", node)); IOUtils.closeWhileHandlingException(nodeChannels); } } @@ -992,27 +988,21 @@ protected void onException(TcpChannel channel, Exception e) { } if (isCloseConnectionException(e)) { - logger.trace( - (Supplier) () -> new ParameterizedMessage( - "close connection exception caught on transport layer [{}], disconnecting from relevant node", - channel), - e); + logger.trace(() -> new ParameterizedMessage( + "close connection exception caught on transport layer [{}], disconnecting from relevant node", channel), e); // close the channel, which will cause a node to be disconnected if relevant TcpChannel.closeChannel(channel, false); } else if (isConnectException(e)) { - logger.trace((Supplier) () -> new ParameterizedMessage("connect exception caught on transport layer [{}]", channel), e); + logger.trace(() -> new ParameterizedMessage("connect exception caught on transport layer [{}]", channel), e); // close the channel as safe measure, which will cause a node to be disconnected if relevant TcpChannel.closeChannel(channel, false); } else if (e instanceof BindException) { - logger.trace((Supplier) () -> new ParameterizedMessage("bind exception caught on transport layer [{}]", channel), e); + logger.trace(() -> new ParameterizedMessage("bind exception caught on transport layer [{}]", channel), e); // close the channel as safe measure, which will cause a node to be disconnected if relevant TcpChannel.closeChannel(channel, false); } else if (e instanceof CancelledKeyException) { - logger.trace( - (Supplier) () -> new ParameterizedMessage( - "cancelled key exception caught on transport layer [{}], disconnecting from relevant node", - channel), - e); + logger.trace(() -> new ParameterizedMessage( + "cancelled key exception caught on transport layer [{}], disconnecting from relevant node", channel), e); // close the channel as safe measure, which will cause a node to be disconnected if relevant TcpChannel.closeChannel(channel, false); } else if (e instanceof TcpTransport.HttpOnTransportException) { @@ -1034,8 +1024,7 @@ protected void innerOnFailure(Exception e) { internalSendMessage(channel, message, closeChannel); } } else { - logger.warn( - (Supplier) () -> new ParameterizedMessage("exception caught on transport layer [{}], closing connection", channel), e); + logger.warn(() -> new ParameterizedMessage("exception caught on transport layer [{}], closing connection", channel), e); // close the channel, which will cause a node to be disconnected if relevant TcpChannel.closeChannel(channel, false); } @@ -1538,7 +1527,7 @@ private void handleException(final TransportResponseHandler handler, Throwable e try { handler.handleException(rtx); } catch (Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("failed to handle exception response [{}]", handler), e); + logger.error(() -> new ParameterizedMessage("failed to handle exception response [{}]", handler), e); } }); } @@ -1581,9 +1570,7 @@ protected String handleRequest(TcpChannel channel, String profileName, final Str transportChannel.sendResponse(e); } catch (IOException inner) { inner.addSuppressed(e); - logger.warn( - (Supplier) () -> new ParameterizedMessage( - "Failed to send error message back to client for action [{}]", action), inner); + logger.warn(() -> new ParameterizedMessage("Failed to send error message back to client for action [{}]", action), inner); } } return action; @@ -1629,8 +1616,7 @@ public void onFailure(Exception e) { transportChannel.sendResponse(e); } catch (Exception inner) { inner.addSuppressed(e); - logger.warn( - (Supplier) () -> new ParameterizedMessage( + logger.warn(() -> new ParameterizedMessage( "Failed to send error message back to client for action [{}]", reg.getAction()), inner); } } diff --git a/server/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java b/server/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java index 3d46c0853ec49..4ba2769edb4a2 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java @@ -63,12 +63,8 @@ public void handleException(TransportException exp) { try { channel.sendResponse(exp); } catch (IOException e) { - logger.debug( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage( - "failed to send failure {}", - extraInfoOnError == null ? "" : "(" + extraInfoOnError + ")"), - e); + logger.debug(() -> new ParameterizedMessage( + "failed to send failure {}", extraInfoOnError == null ? "" : "(" + extraInfoOnError + ")"), e); } } diff --git a/server/src/main/java/org/elasticsearch/transport/TransportService.java b/server/src/main/java/org/elasticsearch/transport/TransportService.java index a54e436312732..44dac1d8eae8f 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportService.java @@ -812,9 +812,7 @@ void onResponseSent(long requestId, String action, Exception e) { } protected void traceResponseSent(long requestId, String action, Exception e) { - tracerLog.trace( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("[{}][{}] sent error response", requestId, action), e); + tracerLog.trace(() -> new ParameterizedMessage("[{}][{}] sent error response", requestId, action), e); } /** diff --git a/server/src/test/java/org/elasticsearch/cluster/service/ClusterServiceIT.java b/server/src/test/java/org/elasticsearch/cluster/service/ClusterServiceIT.java index 8514cb4ac2e1b..6b33b7eb3e2a8 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/ClusterServiceIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/ClusterServiceIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.service; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterState; @@ -104,7 +103,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { @Override public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("failed to execute callback in test {}", source), e); + logger.error(() -> new ParameterizedMessage("failed to execute callback in test {}", source), e); onFailure.set(true); latch.countDown(); } @@ -172,7 +171,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { @Override public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("failed to execute callback in test {}", source), e); + logger.error(() -> new ParameterizedMessage("failed to execute callback in test {}", source), e); onFailure.set(true); latch.countDown(); } @@ -243,7 +242,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { @Override public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("failed to execute callback in test {}", source), e); + logger.error(() -> new ParameterizedMessage("failed to execute callback in test {}", source), e); onFailure.set(true); latch.countDown(); } @@ -314,7 +313,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { @Override public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("failed to execute callback in test {}", source), e); + logger.error(() -> new ParameterizedMessage("failed to execute callback in test {}", source), e); onFailure.set(true); latch.countDown(); } diff --git a/server/src/test/java/org/elasticsearch/cluster/service/TaskBatcherTests.java b/server/src/test/java/org/elasticsearch/cluster/service/TaskBatcherTests.java index d5af9dd558155..ebb15b42b7a3a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/TaskBatcherTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/TaskBatcherTests.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.cluster.ClusterStateTaskConfig; import org.elasticsearch.cluster.metadata.ProcessClusterEventTimeoutException; @@ -209,7 +208,7 @@ public void testTasksAreExecutedInOrder() throws BrokenBarrierException, Interru final TestListener listener = new TestListener() { @Override public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure: [{}]", source), e); + logger.error(() -> new ParameterizedMessage("unexpected failure: [{}]", source), e); failures.add(new Tuple<>(source, e)); updateLatch.countDown(); } diff --git a/server/src/test/java/org/elasticsearch/common/logging/LoggersTests.java b/server/src/test/java/org/elasticsearch/common/logging/LoggersTests.java new file mode 100644 index 0000000000000..6c18bd0afab1b --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/logging/LoggersTests.java @@ -0,0 +1,106 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.logging; + +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.LogEvent; +import org.apache.logging.log4j.core.appender.AbstractAppender; +import org.apache.logging.log4j.core.filter.RegexFilter; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.net.UnknownHostException; +import java.util.Arrays; + +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; + +public class LoggersTests extends ESTestCase { + + static class MockAppender extends AbstractAppender { + private LogEvent lastEvent; + + MockAppender(final String name) throws IllegalAccessException { + super(name, RegexFilter.createFilter(".*(\n.*)*", new String[0], false, null, null), null); + } + + @Override + public void append(LogEvent event) { + lastEvent = event; + } + + ParameterizedMessage lastParameterizedMessage() { + return (ParameterizedMessage) lastEvent.getMessage(); + } + } + + public void testParameterizedMessageLambda() throws Exception { + final MockAppender appender = new MockAppender("trace_appender"); + appender.start(); + final Logger testLogger = Loggers.getLogger(LoggersTests.class); + Loggers.addAppender(testLogger, appender); + Loggers.setLevel(testLogger, Level.TRACE); + + Throwable ex = randomException(); + testLogger.error(() -> new ParameterizedMessage("an error message"), ex); + assertThat(appender.lastEvent.getLevel(), equalTo(Level.ERROR)); + assertThat(appender.lastEvent.getThrown(), equalTo(ex)); + assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("an error message")); + + ex = randomException(); + testLogger.warn(() -> new ParameterizedMessage("a warn message: [{}]", "long gc"), ex); + assertThat(appender.lastEvent.getLevel(), equalTo(Level.WARN)); + assertThat(appender.lastEvent.getThrown(), equalTo(ex)); + assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("a warn message: [long gc]")); + assertThat(appender.lastParameterizedMessage().getParameters(), arrayContaining("long gc")); + + testLogger.info(() -> new ParameterizedMessage("an info message a=[{}], b=[{}], c=[{}]", 1, 2, 3)); + assertThat(appender.lastEvent.getLevel(), equalTo(Level.INFO)); + assertThat(appender.lastEvent.getThrown(), nullValue()); + assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("an info message a=[1], b=[2], c=[3]")); + assertThat(appender.lastParameterizedMessage().getParameters(), arrayContaining(1, 2, 3)); + + ex = randomException(); + testLogger.debug(() -> new ParameterizedMessage("a debug message options = {}", Arrays.asList("yes", "no")), ex); + assertThat(appender.lastEvent.getLevel(), equalTo(Level.DEBUG)); + assertThat(appender.lastEvent.getThrown(), equalTo(ex)); + assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("a debug message options = [yes, no]")); + assertThat(appender.lastParameterizedMessage().getParameters(), arrayContaining(Arrays.asList("yes", "no"))); + + ex = randomException(); + testLogger.trace(() -> new ParameterizedMessage("a trace message; element = [{}]", new Object[]{null}), ex); + assertThat(appender.lastEvent.getLevel(), equalTo(Level.TRACE)); + assertThat(appender.lastEvent.getThrown(), equalTo(ex)); + assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("a trace message; element = [null]")); + assertThat(appender.lastParameterizedMessage().getParameters(), arrayContaining(new Object[]{null})); + } + + private Throwable randomException(){ + return randomFrom( + new IOException("file not found"), + new UnknownHostException("unknown hostname"), + new OutOfMemoryError("out of space"), + new IllegalArgumentException("index must be between 10 and 100") + ); + } +} diff --git a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java index 55f5b70e70299..2998ec8a6ba66 100644 --- a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java @@ -20,7 +20,6 @@ package org.elasticsearch.discovery; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.CorruptIndexException; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.DocWriteResponse; @@ -142,9 +141,7 @@ public void testAckedIndexing() throws Exception { } catch (ElasticsearchException e) { exceptedExceptions.add(e); final String docId = id; - logger.trace( - (Supplier) - () -> new ParameterizedMessage("[{}] failed id [{}] through node [{}]", name, docId, node), e); + logger.trace(() -> new ParameterizedMessage("[{}] failed id [{}] through node [{}]", name, docId, node), e); } finally { countDownLatchRef.get().countDown(); logger.trace("[{}] decreased counter : {}", name, countDownLatchRef.get().getCount()); @@ -152,9 +149,7 @@ public void testAckedIndexing() throws Exception { } catch (InterruptedException e) { // fine - semaphore interrupt } catch (AssertionError | Exception e) { - logger.info( - (Supplier) () -> new ParameterizedMessage("unexpected exception in background thread of [{}]", node), - e); + logger.info(() -> new ParameterizedMessage("unexpected exception in background thread of [{}]", node), e); } } }); diff --git a/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java index 4225b6802ce96..43e3b2ef01b67 100644 --- a/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java @@ -20,7 +20,6 @@ package org.elasticsearch.discovery; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -223,7 +222,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { @Override public void onFailure(String source, Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failure [{}]", source), e); + logger.warn(() -> new ParameterizedMessage("failure [{}]", source), e); } }); diff --git a/server/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java b/server/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java index 0fdb732be9535..9e57382bb4bc8 100644 --- a/server/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.discovery.zen; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; @@ -835,7 +834,7 @@ public void onSuccess() { @Override public void onFailure(Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected error for {}", future), e); + logger.error(() -> new ParameterizedMessage("unexpected error for {}", future), e); future.markAsFailed(e); } }); diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index 1fb36486c2b3f..2317d8fb0d8bf 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -21,7 +21,6 @@ import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.document.Field; import org.apache.lucene.document.NumericDocValuesField; @@ -920,7 +919,7 @@ public void doRun() throws BrokenBarrierException, InterruptedException, IOExcep @Override public void onFailure(Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("--> writer [{}] had an error", threadName), e); + logger.error(() -> new ParameterizedMessage("--> writer [{}] had an error", threadName), e); errors.add(e); } }, threadName); @@ -935,7 +934,7 @@ public void onFailure(Exception e) { @Override public void onFailure(Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("--> reader [{}] had an error", threadId), e); + logger.error(() -> new ParameterizedMessage("--> reader [{}] had an error", threadId), e); errors.add(e); try { closeRetentionLock(); diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index e4d73ce0f41ea..6079a9104d3db 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -110,8 +110,7 @@ public void testRandomClusterStateUpdates() { state = randomlyUpdateClusterState(state, clusterStateServiceMap, MockIndicesService::new); } catch (AssertionError error) { ClusterState finalState = state; - logger.error((org.apache.logging.log4j.util.Supplier) () -> - new ParameterizedMessage("failed to random change state. last good state: \n{}", finalState), error); + logger.error(() -> new ParameterizedMessage("failed to random change state. last good state: \n{}", finalState), error); throw error; } } @@ -125,7 +124,7 @@ public void testRandomClusterStateUpdates() { try { indicesClusterStateService.applyClusterState(event); } catch (AssertionError error) { - logger.error((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( + logger.error(new ParameterizedMessage( "failed to apply change on [{}].\n *** Previous state ***\n{}\n *** New state ***\n{}", node, event.previousState(), event.state()), error); throw error; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java index 0fcf794ee1d83..e277902ace24d 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.aggregations.metrics; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.common.settings.Settings; @@ -482,7 +481,7 @@ private void assertShardExecutionState(SearchResponse response, int expectedFail ShardSearchFailure[] failures = response.getShardFailures(); if (failures.length != expectedFailures) { for (ShardSearchFailure failure : failures) { - logger.error((Supplier) () -> new ParameterizedMessage("Shard Failure: {}", failure), failure.getCause()); + logger.error(new ParameterizedMessage("Shard Failure: {}", failure), failure.getCause()); } fail("Unexpected shard failures!"); } diff --git a/server/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java b/server/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java index 0038ef368c150..3b1002a6f68c4 100644 --- a/server/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java +++ b/server/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.geo; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy; import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree; import org.apache.lucene.spatial.query.SpatialArgs; @@ -478,7 +477,7 @@ protected static boolean testRelationSupport(SpatialOperation relation) { final SpatialOperation finalRelation = relation; ESLoggerFactory .getLogger(GeoFilterIT.class.getName()) - .info((Supplier) () -> new ParameterizedMessage("Unsupported spatial operation {}", finalRelation), e); + .info(() -> new ParameterizedMessage("Unsupported spatial operation {}", finalRelation), e); return false; } } From fede63356301a7be688007f36c59b62aedcd8961 Mon Sep 17 00:00:00 2001 From: Nicholas Knize Date: Mon, 19 Jun 2017 14:29:12 -0500 Subject: [PATCH 21/27] Add Z value support to geo_shape This enhancement adds Z value support (source only) to geo_shape fields. If vertices are provided with a third dimension, the third dimension is ignored for indexing but returned as part of source. Like beofre, any values greater than the 3rd dimension are ignored. closes #23747 --- .../mapping/types/geo-point.asciidoc | 7 + .../mapping/types/geo-shape.asciidoc | 6 + .../elasticsearch/common/geo/GeoPoint.java | 36 +++-- .../elasticsearch/common/geo/GeoUtils.java | 26 ++-- .../common/geo/builders/CircleBuilder.java | 4 + .../geo/builders/CoordinatesBuilder.java | 12 +- .../common/geo/builders/EnvelopeBuilder.java | 8 ++ .../builders/GeometryCollectionBuilder.java | 9 ++ .../geo/builders/LineStringBuilder.java | 9 ++ .../geo/builders/MultiLineStringBuilder.java | 8 ++ .../geo/builders/MultiPointBuilder.java | 9 ++ .../geo/builders/MultiPolygonBuilder.java | 9 ++ .../common/geo/builders/PointBuilder.java | 5 + .../common/geo/builders/PolygonBuilder.java | 9 ++ .../common/geo/builders/ShapeBuilder.java | 21 ++- .../common/geo/parsers/CoordinateNode.java | 11 ++ .../common/geo/parsers/GeoJsonParser.java | 35 +++-- .../common/geo/parsers/GeoWKTParser.java | 87 +++++++----- .../common/geo/parsers/ShapeParser.java | 3 +- .../index/mapper/GeoPointFieldMapper.java | 61 ++++++-- .../index/mapper/GeoShapeFieldMapper.java | 37 ++++- .../support/ValuesSourceConfig.java | 2 +- .../completion/context/GeoContextMapping.java | 4 +- .../common/geo/GeoJsonShapeParserTests.java | 117 ++++++++++++++-- .../common/geo/GeoWKTShapeParserTests.java | 132 ++++++++++++++++-- .../common/geo/ShapeBuilderTests.java | 45 ++++++ .../mapper/GeoPointFieldMapperTests.java | 75 ++++++++++ .../mapper/GeoShapeFieldMapperTests.java | 37 +++++ .../index/search/geo/GeoUtilsTests.java | 29 +++- 29 files changed, 739 insertions(+), 114 deletions(-) diff --git a/docs/reference/mapping/types/geo-point.asciidoc b/docs/reference/mapping/types/geo-point.asciidoc index 7d92bb3b2e7c7..83e2064e5b8cc 100644 --- a/docs/reference/mapping/types/geo-point.asciidoc +++ b/docs/reference/mapping/types/geo-point.asciidoc @@ -105,6 +105,13 @@ The following parameters are accepted by `geo_point` fields: If `true`, malformed geo-points are ignored. If `false` (default), malformed geo-points throw an exception and reject the whole document. +<>:: + + If `true` (default) three dimension points will be accepted (stored in source) + but only latitude and longitude values will be indexed; the third dimension is + ignored. If `false`, geo-points containing any more than latitude and longitude + (two dimensions) values throw an exception and reject the whole document. + ==== Using geo-points in scripts When accessing the value of a geo-point in a script, the value is returned as diff --git a/docs/reference/mapping/types/geo-shape.asciidoc b/docs/reference/mapping/types/geo-shape.asciidoc index 23caaf6a8ec5c..26974f1f867de 100644 --- a/docs/reference/mapping/types/geo-shape.asciidoc +++ b/docs/reference/mapping/types/geo-shape.asciidoc @@ -91,6 +91,12 @@ false (default), malformed GeoJSON and WKT shapes throw an exception and reject entire document. | `false` +|`ignore_z_value` |If `true` (default) three dimension points will be accepted (stored in source) +but only latitude and longitude values will be indexed; the third dimension is ignored. If `false`, +geo-points containing any more than latitude and longitude (two dimensions) values throw an exception +and reject the whole document. +| `true` + |======================================================================= diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeoPoint.java b/server/src/main/java/org/elasticsearch/common/geo/GeoPoint.java index 5905695fb73fe..e43c9e9a8e3cc 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/GeoPoint.java +++ b/server/src/main/java/org/elasticsearch/common/geo/GeoPoint.java @@ -25,15 +25,17 @@ import org.apache.lucene.index.IndexableField; import org.apache.lucene.util.BitUtil; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Strings; import java.io.IOException; import java.util.Arrays; import static org.elasticsearch.common.geo.GeoHashUtils.mortonEncode; import static org.elasticsearch.common.geo.GeoHashUtils.stringEncode; +import static org.elasticsearch.index.mapper.GeoPointFieldMapper.Names.IGNORE_Z_VALUE; public final class GeoPoint implements ToXContentFragment { @@ -79,14 +81,24 @@ public GeoPoint resetLon(double lon) { } public GeoPoint resetFromString(String value) { - int comma = value.indexOf(','); - if (comma != -1) { - lat = Double.parseDouble(value.substring(0, comma).trim()); - lon = Double.parseDouble(value.substring(comma + 1).trim()); - } else { - resetFromGeoHash(value); + return resetFromString(value, false); + } + + public GeoPoint resetFromString(String value, final boolean ignoreZValue) { + if (value.contains(",")) { + String[] vals = value.split(","); + if (vals.length > 3) { + throw new ElasticsearchParseException("failed to parse [{}], expected 2 or 3 coordinates " + + "but found: [{}]", vals.length); + } + double lat = Double.parseDouble(vals[0].trim()); + double lon = Double.parseDouble(vals[1].trim()); + if (vals.length > 2) { + GeoPoint.assertZValue(ignoreZValue, Double.parseDouble(vals[2].trim())); + } + return reset(lat, lon); } - return this; + return resetFromGeoHash(value); } public GeoPoint resetFromIndexHash(long hash) { @@ -193,4 +205,12 @@ public static GeoPoint fromGeohash(long geohashLong) { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { return builder.latlon(lat, lon); } + + public static double assertZValue(final boolean ignoreZValue, double zValue) { + if (ignoreZValue == false) { + throw new ElasticsearchParseException("Exception parsing coordinates: found Z value [{}] but [{}] " + + "parameter is [{}]", zValue, IGNORE_Z_VALUE, ignoreZValue); + } + return zValue; + } } diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java b/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java index aed72f502bfe9..655b259c81074 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java +++ b/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java @@ -24,6 +24,7 @@ import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree; import org.apache.lucene.util.SloppyMath; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; @@ -345,6 +346,11 @@ public static GeoPoint parseGeoPoint(XContentParser parser) throws IOException, return parseGeoPoint(parser, new GeoPoint()); } + + public static GeoPoint parseGeoPoint(XContentParser parser, GeoPoint point) throws IOException, ElasticsearchParseException { + return parseGeoPoint(parser, point, false); + } + /** * Parse a {@link GeoPoint} with a {@link XContentParser}. A geopoint has one of the following forms: * @@ -359,7 +365,8 @@ public static GeoPoint parseGeoPoint(XContentParser parser) throws IOException, * @param point A {@link GeoPoint} that will be reset by the values parsed * @return new {@link GeoPoint} parsed from the parse */ - public static GeoPoint parseGeoPoint(XContentParser parser, GeoPoint point) throws IOException, ElasticsearchParseException { + public static GeoPoint parseGeoPoint(XContentParser parser, GeoPoint point, final boolean ignoreZValue) + throws IOException, ElasticsearchParseException { double lat = Double.NaN; double lon = Double.NaN; String geohash = null; @@ -438,7 +445,7 @@ public static GeoPoint parseGeoPoint(XContentParser parser, GeoPoint point) thro } else if(element == 2) { lat = parser.doubleValue(); } else { - throw new ElasticsearchParseException("only two values allowed"); + GeoPoint.assertZValue(ignoreZValue, parser.doubleValue()); } } else { throw new ElasticsearchParseException("numeric value expected"); @@ -446,25 +453,12 @@ public static GeoPoint parseGeoPoint(XContentParser parser, GeoPoint point) thro } return point.reset(lat, lon); } else if(parser.currentToken() == Token.VALUE_STRING) { - String data = parser.text(); - return parseGeoPoint(data, point); + return point.resetFromString(parser.text(), ignoreZValue); } else { throw new ElasticsearchParseException("geo_point expected"); } } - /** parse a {@link GeoPoint} from a String */ - public static GeoPoint parseGeoPoint(String data, GeoPoint point) { - int comma = data.indexOf(','); - if(comma > 0) { - double lat = Double.parseDouble(data.substring(0, comma).trim()); - double lon = Double.parseDouble(data.substring(comma + 1).trim()); - return point.reset(lat, lon); - } else { - return point.resetFromGeoHash(data); - } - } - /** Returns the maximum distance/radius (in meters) from the point 'center' before overlapping */ public static double maxRadialDistanceMeters(final double centerLat, final double centerLon) { if (Math.abs(centerLat) == MAX_LAT) { diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java index ecc33b94ae4eb..024ec91e88765 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java @@ -173,6 +173,10 @@ public String toWKT() { throw new UnsupportedOperationException("The WKT spec does not support CIRCLE geometry"); } + public int numDimensions() { + return Double.isNaN(center.z) ? 2 : 3; + } + @Override public int hashCode() { return Objects.hash(center, radius, unit.ordinal()); diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/CoordinatesBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/CoordinatesBuilder.java index 43393d5e08630..2eaf5f26dc78b 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/CoordinatesBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/CoordinatesBuilder.java @@ -20,6 +20,7 @@ package org.elasticsearch.common.geo.builders; import com.vividsolutions.jts.geom.Coordinate; +import org.elasticsearch.ElasticsearchException; import java.util.ArrayList; import java.util.Arrays; @@ -41,7 +42,16 @@ public class CoordinatesBuilder { * @return this */ public CoordinatesBuilder coordinate(Coordinate coordinate) { - this.points.add(coordinate); + int expectedDims; + int actualDims; + if (points.isEmpty() == false + && (expectedDims = Double.isNaN(points.get(0).z) ? 2 : 3) != (actualDims = Double.isNaN(coordinate.z) ? 2 : 3)) { + throw new ElasticsearchException("unable to add coordinate to CoordinateBuilder: " + + "coordinate dimensions do not match. Expected [{}] but found [{}]", expectedDims, actualDims); + + } else { + this.points.add(coordinate); + } return this; } diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java index 4949c3633470d..34da7e7fc2f6c 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java @@ -45,6 +45,9 @@ public class EnvelopeBuilder extends ShapeBuilder { public EnvelopeBuilder(Coordinate topLeft, Coordinate bottomRight) { Objects.requireNonNull(topLeft, "topLeft of envelope cannot be null"); Objects.requireNonNull(bottomRight, "bottomRight of envelope cannot be null"); + if (Double.isNaN(topLeft.z) != Double.isNaN(bottomRight.z)) { + throw new IllegalArgumentException("expected same number of dimensions for topLeft and bottomRight"); + } this.topLeft = topLeft; this.bottomRight = bottomRight; } @@ -114,6 +117,11 @@ public GeoShapeType type() { return TYPE; } + @Override + public int numDimensions() { + return Double.isNaN(topLeft.z) ? 2 : 3; + } + @Override public int hashCode() { return Objects.hash(topLeft, bottomRight); diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java index 84052939da48b..b9c23842a5a8c 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java @@ -159,6 +159,15 @@ public GeoShapeType type() { return TYPE; } + @Override + public int numDimensions() { + if (shapes == null || shapes.isEmpty()) { + throw new IllegalStateException("unable to get number of dimensions, " + + "GeometryCollection has not yet been initialized"); + } + return shapes.get(0).numDimensions(); + } + @Override public Shape build() { diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java index c595c126f7a62..a888ee0867cb2 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java @@ -91,6 +91,15 @@ public GeoShapeType type() { return TYPE; } + @Override + public int numDimensions() { + if (coordinates == null || coordinates.isEmpty()) { + throw new IllegalStateException("unable to get number of dimensions, " + + "LineString has not yet been initialized"); + } + return Double.isNaN(coordinates.get(0).z) ? 2 : 3; + } + @Override public JtsGeometry build() { Coordinate[] coordinates = this.coordinates.toArray(new Coordinate[this.coordinates.size()]); diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java index 34a8960f69c53..13f9968864c32 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java @@ -101,6 +101,14 @@ protected StringBuilder contentToWKT() { return sb; } + public int numDimensions() { + if (lines == null || lines.isEmpty()) { + throw new IllegalStateException("unable to get number of dimensions, " + + "LineStrings have not yet been initialized"); + } + return lines.get(0).numDimensions(); + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java index ae38126f87bac..03d7683c8e113 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java @@ -80,4 +80,13 @@ public XShapeCollection build() { public GeoShapeType type() { return TYPE; } + + @Override + public int numDimensions() { + if (coordinates == null || coordinates.isEmpty()) { + throw new IllegalStateException("unable to get number of dimensions, " + + "LineString has not yet been initialized"); + } + return Double.isNaN(coordinates.get(0).z) ? 2 : 3; + } } diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java index aa577887e00d2..168d57c1764a7 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java @@ -153,6 +153,15 @@ public GeoShapeType type() { return TYPE; } + @Override + public int numDimensions() { + if (polygons == null || polygons.isEmpty()) { + throw new IllegalStateException("unable to get number of dimensions, " + + "Polygons have not yet been initialized"); + } + return polygons.get(0).numDimensions(); + } + @Override public Shape build() { diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java index 029ac14955a3a..0380e0be07392 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java @@ -93,4 +93,9 @@ public Point build() { public GeoShapeType type() { return TYPE; } + + @Override + public int numDimensions() { + return Double.isNaN(coordinates.get(0).z) ? 2 : 3; + } } diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java index b0b37dbafa9a3..dade127456c8c 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java @@ -283,6 +283,15 @@ public GeoShapeType type() { return TYPE; } + @Override + public int numDimensions() { + if (shell == null) { + throw new IllegalStateException("unable to get number of dimensions, " + + "Polygon has not yet been initialized"); + } + return shell.numDimensions(); + } + protected static Polygon polygon(GeometryFactory factory, Coordinate[][] polygon) { LinearRing shell = factory.createLinearRing(polygon[0]); LinearRing[] holes; diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java index 106c312a3bc93..cd0ecdc4aeb88 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java @@ -25,6 +25,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.Assertions; +import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.geo.GeoShapeType; import org.elasticsearch.common.geo.parsers.GeoWKTParser; @@ -109,7 +110,13 @@ protected ShapeBuilder(StreamInput in) throws IOException { } protected static Coordinate readFromStream(StreamInput in) throws IOException { - return new Coordinate(in.readDouble(), in.readDouble()); + double x = in.readDouble(); + double y = in.readDouble(); + Double z = null; + if (in.getVersion().onOrAfter(Version.V_6_3_0)) { + z = in.readOptionalDouble(); + } + return z == null ? new Coordinate(x, y) : new Coordinate(x, y, z); } @Override @@ -123,6 +130,9 @@ public void writeTo(StreamOutput out) throws IOException { protected static void writeCoordinateTo(Coordinate coordinate, StreamOutput out) throws IOException { out.writeDouble(coordinate.x); out.writeDouble(coordinate.y); + if (out.getVersion().onOrAfter(Version.V_6_3_0)) { + out.writeOptionalDouble(Double.isNaN(coordinate.z) ? null : coordinate.z); + } } @SuppressWarnings("unchecked") @@ -217,6 +227,9 @@ protected static Coordinate shift(Coordinate coordinate, double dateline) { */ public abstract GeoShapeType type(); + /** tracks number of dimensions for this shape */ + public abstract int numDimensions(); + /** * Calculate the intersection of a line segment and a vertical dateline. * @@ -429,7 +442,11 @@ protected static final boolean debugEnabled() { } protected static XContentBuilder toXContent(XContentBuilder builder, Coordinate coordinate) throws IOException { - return builder.startArray().value(coordinate.x).value(coordinate.y).endArray(); + builder.startArray().value(coordinate.x).value(coordinate.y); + if (Double.isNaN(coordinate.z) == false) { + builder.value(coordinate.z); + } + return builder.endArray(); } /** diff --git a/server/src/main/java/org/elasticsearch/common/geo/parsers/CoordinateNode.java b/server/src/main/java/org/elasticsearch/common/geo/parsers/CoordinateNode.java index eb6322196373f..98f8f57d39734 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/parsers/CoordinateNode.java +++ b/server/src/main/java/org/elasticsearch/common/geo/parsers/CoordinateNode.java @@ -19,6 +19,7 @@ package org.elasticsearch.common.geo.parsers; import com.vividsolutions.jts.geom.Coordinate; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -61,6 +62,16 @@ public boolean isEmpty() { return (coordinate == null && (children == null || children.isEmpty())); } + protected int numDimensions() { + if (isEmpty()) { + throw new ElasticsearchException("attempting to get number of dimensions on an empty coordinate node"); + } + if (coordinate != null) { + return Double.isNaN(coordinate.z) ? 2 : 3; + } + return children.get(0).numDimensions(); + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { if (children == null) { diff --git a/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoJsonParser.java b/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoJsonParser.java index 01f26498e9c69..31107d763913e 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoJsonParser.java +++ b/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoJsonParser.java @@ -21,6 +21,7 @@ import com.vividsolutions.jts.geom.Coordinate; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Explicit; +import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoShapeType; import org.elasticsearch.common.geo.builders.CircleBuilder; import org.elasticsearch.common.geo.builders.GeometryCollectionBuilder; @@ -49,6 +50,7 @@ protected static ShapeBuilder parse(XContentParser parser, GeoShapeFieldMapper s ShapeBuilder.Orientation requestedOrientation = (shapeMapper == null) ? ShapeBuilder.Orientation.RIGHT : shapeMapper.fieldType().orientation(); Explicit coerce = (shapeMapper == null) ? GeoShapeFieldMapper.Defaults.COERCE : shapeMapper.coerce(); + Explicit ignoreZValue = (shapeMapper == null) ? GeoShapeFieldMapper.Defaults.IGNORE_Z_VALUE : shapeMapper.ignoreZValue(); String malformedException = null; @@ -68,7 +70,12 @@ protected static ShapeBuilder parse(XContentParser parser, GeoShapeFieldMapper s } } else if (ShapeParser.FIELD_COORDINATES.match(fieldName, parser.getDeprecationHandler())) { parser.nextToken(); - coordinateNode = parseCoordinates(parser); + CoordinateNode tempNode = parseCoordinates(parser, ignoreZValue.value()); + if (coordinateNode != null && tempNode.numDimensions() != coordinateNode.numDimensions()) { + throw new ElasticsearchParseException("Exception parsing coordinates: " + + "number of dimensions do not match"); + } + coordinateNode = tempNode; } else if (ShapeParser.FIELD_GEOMETRIES.match(fieldName, parser.getDeprecationHandler())) { if (shapeType == null) { shapeType = GeoShapeType.GEOMETRYCOLLECTION; @@ -136,36 +143,46 @@ protected static ShapeBuilder parse(XContentParser parser, GeoShapeFieldMapper s * Thrown if an error occurs while reading from the * XContentParser */ - private static CoordinateNode parseCoordinates(XContentParser parser) throws IOException { + private static CoordinateNode parseCoordinates(XContentParser parser, boolean ignoreZValue) throws IOException { XContentParser.Token token = parser.nextToken(); // Base cases if (token != XContentParser.Token.START_ARRAY && token != XContentParser.Token.END_ARRAY && token != XContentParser.Token.VALUE_NULL) { - return new CoordinateNode(parseCoordinate(parser)); + return new CoordinateNode(parseCoordinate(parser, ignoreZValue)); } else if (token == XContentParser.Token.VALUE_NULL) { throw new IllegalArgumentException("coordinates cannot contain NULL values)"); } List nodes = new ArrayList<>(); while (token != XContentParser.Token.END_ARRAY) { - nodes.add(parseCoordinates(parser)); + CoordinateNode node = parseCoordinates(parser, ignoreZValue); + if (nodes.isEmpty() == false && nodes.get(0).numDimensions() != node.numDimensions()) { + throw new ElasticsearchParseException("Exception parsing coordinates: number of dimensions do not match"); + } + nodes.add(node); token = parser.nextToken(); } return new CoordinateNode(nodes); } - private static Coordinate parseCoordinate(XContentParser parser) throws IOException { + private static Coordinate parseCoordinate(XContentParser parser, boolean ignoreZValue) throws IOException { double lon = parser.doubleValue(); parser.nextToken(); double lat = parser.doubleValue(); XContentParser.Token token = parser.nextToken(); - while (token == XContentParser.Token.VALUE_NUMBER) { - token = parser.nextToken(); + // alt (for storing purposes only - future use includes 3d shapes) + double alt = Double.NaN; + if (token == XContentParser.Token.VALUE_NUMBER) { + alt = GeoPoint.assertZValue(ignoreZValue, parser.doubleValue()); + parser.nextToken(); + } + // do not support > 3 dimensions + if (parser.currentToken() == XContentParser.Token.VALUE_NUMBER) { + throw new ElasticsearchParseException("geo coordinates greater than 3 dimensions are not supported"); } - // todo support z/alt - return new Coordinate(lon, lat); + return new Coordinate(lon, lat, alt); } /** diff --git a/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java b/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java index 2a8110c5f4dc2..74e463c723a5a 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java +++ b/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java @@ -20,6 +20,7 @@ import com.vividsolutions.jts.geom.Coordinate; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoShapeType; import java.io.StringReader; @@ -35,6 +36,7 @@ import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.mapper.GeoShapeFieldMapper; import java.io.IOException; import java.io.StreamTokenizer; @@ -52,7 +54,7 @@ public class GeoWKTParser { public static final String LPAREN = "("; public static final String RPAREN = ")"; public static final String COMMA = ","; - private static final String NAN = "NaN"; + public static final String NAN = "NaN"; private static final String NUMBER = ""; private static final String EOF = "END-OF-STREAM"; @@ -61,16 +63,23 @@ public class GeoWKTParser { // no instance private GeoWKTParser() {} - public static ShapeBuilder parse(XContentParser parser) + public static ShapeBuilder parse(XContentParser parser, final GeoShapeFieldMapper shapeMapper) throws IOException, ElasticsearchParseException { - return parseExpectedType(parser, null); + return parseExpectedType(parser, null, shapeMapper); } - /** throws an exception if the parsed geometry type does not match the expected shape type */ public static ShapeBuilder parseExpectedType(XContentParser parser, final GeoShapeType shapeType) throws IOException, ElasticsearchParseException { + return parseExpectedType(parser, shapeType, null); + } + + /** throws an exception if the parsed geometry type does not match the expected shape type */ + public static ShapeBuilder parseExpectedType(XContentParser parser, final GeoShapeType shapeType, + final GeoShapeFieldMapper shapeMapper) + throws IOException, ElasticsearchParseException { StringReader reader = new StringReader(parser.text()); try { + boolean ignoreZValue = (shapeMapper != null && shapeMapper.ignoreZValue().value() == true); // setup the tokenizer; configured to read words w/o numbers StreamTokenizer tokenizer = new StreamTokenizer(reader); tokenizer.resetSyntax(); @@ -83,7 +92,7 @@ public static ShapeBuilder parseExpectedType(XContentParser parser, final GeoSha tokenizer.wordChars('.', '.'); tokenizer.whitespaceChars(0, ' '); tokenizer.commentChar('#'); - ShapeBuilder builder = parseGeometry(tokenizer, shapeType); + ShapeBuilder builder = parseGeometry(tokenizer, shapeType, ignoreZValue); checkEOF(tokenizer); return builder; } finally { @@ -92,7 +101,7 @@ public static ShapeBuilder parseExpectedType(XContentParser parser, final GeoSha } /** parse geometry from the stream tokenizer */ - private static ShapeBuilder parseGeometry(StreamTokenizer stream, GeoShapeType shapeType) + private static ShapeBuilder parseGeometry(StreamTokenizer stream, GeoShapeType shapeType, final boolean ignoreZValue) throws IOException, ElasticsearchParseException { final GeoShapeType type = GeoShapeType.forName(nextWord(stream)); if (shapeType != null && shapeType != GeoShapeType.GEOMETRYCOLLECTION) { @@ -102,21 +111,21 @@ private static ShapeBuilder parseGeometry(StreamTokenizer stream, GeoShapeType s } switch (type) { case POINT: - return parsePoint(stream); + return parsePoint(stream, ignoreZValue); case MULTIPOINT: - return parseMultiPoint(stream); + return parseMultiPoint(stream, ignoreZValue); case LINESTRING: - return parseLine(stream); + return parseLine(stream, ignoreZValue); case MULTILINESTRING: - return parseMultiLine(stream); + return parseMultiLine(stream, ignoreZValue); case POLYGON: - return parsePolygon(stream); + return parsePolygon(stream, ignoreZValue); case MULTIPOLYGON: - return parseMultiPolygon(stream); + return parseMultiPolygon(stream, ignoreZValue); case ENVELOPE: return parseBBox(stream); case GEOMETRYCOLLECTION: - return parseGeometryCollection(stream); + return parseGeometryCollection(stream, ignoreZValue); default: throw new IllegalArgumentException("Unknown geometry type: " + type); } @@ -137,24 +146,25 @@ private static EnvelopeBuilder parseBBox(StreamTokenizer stream) throws IOExcept return new EnvelopeBuilder(new Coordinate(minLon, maxLat), new Coordinate(maxLon, minLat)); } - private static PointBuilder parsePoint(StreamTokenizer stream) throws IOException, ElasticsearchParseException { + private static PointBuilder parsePoint(StreamTokenizer stream, final boolean ignoreZValue) + throws IOException, ElasticsearchParseException { if (nextEmptyOrOpen(stream).equals(EMPTY)) { return null; } PointBuilder pt = new PointBuilder(nextNumber(stream), nextNumber(stream)); if (isNumberNext(stream) == true) { - nextNumber(stream); + GeoPoint.assertZValue(ignoreZValue, nextNumber(stream)); } nextCloser(stream); return pt; } - private static List parseCoordinateList(StreamTokenizer stream) + private static List parseCoordinateList(StreamTokenizer stream, final boolean ignoreZValue) throws IOException, ElasticsearchParseException { CoordinatesBuilder coordinates = new CoordinatesBuilder(); boolean isOpenParen = false; if (isNumberNext(stream) || (isOpenParen = nextWord(stream).equals(LPAREN))) { - coordinates.coordinate(parseCoordinate(stream)); + coordinates.coordinate(parseCoordinate(stream, ignoreZValue)); } if (isOpenParen && nextCloser(stream).equals(RPAREN) == false) { @@ -164,7 +174,7 @@ private static List parseCoordinateList(StreamTokenizer stream) while (nextCloserOrComma(stream).equals(COMMA)) { isOpenParen = false; if (isNumberNext(stream) || (isOpenParen = nextWord(stream).equals(LPAREN))) { - coordinates.coordinate(parseCoordinate(stream)); + coordinates.coordinate(parseCoordinate(stream, ignoreZValue)); } if (isOpenParen && nextCloser(stream).equals(RPAREN) == false) { throw new ElasticsearchParseException("expected: " + RPAREN + " but found: " + tokenString(stream), stream.lineno()); @@ -173,77 +183,82 @@ private static List parseCoordinateList(StreamTokenizer stream) return coordinates.build(); } - private static Coordinate parseCoordinate(StreamTokenizer stream) + private static Coordinate parseCoordinate(StreamTokenizer stream, final boolean ignoreZValue) throws IOException, ElasticsearchParseException { final double lon = nextNumber(stream); final double lat = nextNumber(stream); Double z = null; if (isNumberNext(stream)) { - z = nextNumber(stream); + z = GeoPoint.assertZValue(ignoreZValue, nextNumber(stream)); } return z == null ? new Coordinate(lon, lat) : new Coordinate(lon, lat, z); } - private static MultiPointBuilder parseMultiPoint(StreamTokenizer stream) throws IOException, ElasticsearchParseException { + private static MultiPointBuilder parseMultiPoint(StreamTokenizer stream, final boolean ignoreZValue) + throws IOException, ElasticsearchParseException { String token = nextEmptyOrOpen(stream); if (token.equals(EMPTY)) { return null; } - return new MultiPointBuilder(parseCoordinateList(stream)); + return new MultiPointBuilder(parseCoordinateList(stream, ignoreZValue)); } - private static LineStringBuilder parseLine(StreamTokenizer stream) throws IOException, ElasticsearchParseException { + private static LineStringBuilder parseLine(StreamTokenizer stream, final boolean ignoreZValue) + throws IOException, ElasticsearchParseException { String token = nextEmptyOrOpen(stream); if (token.equals(EMPTY)) { return null; } - return new LineStringBuilder(parseCoordinateList(stream)); + return new LineStringBuilder(parseCoordinateList(stream, ignoreZValue)); } - private static MultiLineStringBuilder parseMultiLine(StreamTokenizer stream) throws IOException, ElasticsearchParseException { + private static MultiLineStringBuilder parseMultiLine(StreamTokenizer stream, final boolean ignoreZValue) + throws IOException, ElasticsearchParseException { String token = nextEmptyOrOpen(stream); if (token.equals(EMPTY)) { return null; } MultiLineStringBuilder builder = new MultiLineStringBuilder(); - builder.linestring(parseLine(stream)); + builder.linestring(parseLine(stream, ignoreZValue)); while (nextCloserOrComma(stream).equals(COMMA)) { - builder.linestring(parseLine(stream)); + builder.linestring(parseLine(stream, ignoreZValue)); } return builder; } - private static PolygonBuilder parsePolygon(StreamTokenizer stream) throws IOException, ElasticsearchParseException { + private static PolygonBuilder parsePolygon(StreamTokenizer stream, final boolean ignoreZValue) + throws IOException, ElasticsearchParseException { if (nextEmptyOrOpen(stream).equals(EMPTY)) { return null; } - PolygonBuilder builder = new PolygonBuilder(parseLine(stream), ShapeBuilder.Orientation.RIGHT); + PolygonBuilder builder = new PolygonBuilder(parseLine(stream, ignoreZValue), ShapeBuilder.Orientation.RIGHT); while (nextCloserOrComma(stream).equals(COMMA)) { - builder.hole(parseLine(stream)); + builder.hole(parseLine(stream, ignoreZValue)); } return builder; } - private static MultiPolygonBuilder parseMultiPolygon(StreamTokenizer stream) throws IOException, ElasticsearchParseException { + private static MultiPolygonBuilder parseMultiPolygon(StreamTokenizer stream, final boolean ignoreZValue) + throws IOException, ElasticsearchParseException { if (nextEmptyOrOpen(stream).equals(EMPTY)) { return null; } - MultiPolygonBuilder builder = new MultiPolygonBuilder().polygon(parsePolygon(stream)); + MultiPolygonBuilder builder = new MultiPolygonBuilder().polygon(parsePolygon(stream, ignoreZValue)); while (nextCloserOrComma(stream).equals(COMMA)) { - builder.polygon(parsePolygon(stream)); + builder.polygon(parsePolygon(stream, ignoreZValue)); } return builder; } - private static GeometryCollectionBuilder parseGeometryCollection(StreamTokenizer stream) + private static GeometryCollectionBuilder parseGeometryCollection(StreamTokenizer stream, final boolean ignoreZValue) throws IOException, ElasticsearchParseException { if (nextEmptyOrOpen(stream).equals(EMPTY)) { return null; } GeometryCollectionBuilder builder = new GeometryCollectionBuilder().shape( - parseGeometry(stream, GeoShapeType.GEOMETRYCOLLECTION)); + parseGeometry(stream, GeoShapeType.GEOMETRYCOLLECTION, ignoreZValue)); while (nextCloserOrComma(stream).equals(COMMA)) { - builder.shape(parseGeometry(stream, null)); + builder.shape(parseGeometry(stream, null, ignoreZValue)); } return builder; } diff --git a/server/src/main/java/org/elasticsearch/common/geo/parsers/ShapeParser.java b/server/src/main/java/org/elasticsearch/common/geo/parsers/ShapeParser.java index 0ee3333c4802c..e7ec489191762 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/parsers/ShapeParser.java +++ b/server/src/main/java/org/elasticsearch/common/geo/parsers/ShapeParser.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.mapper.GeoPointFieldMapper; import org.elasticsearch.index.mapper.GeoShapeFieldMapper; import java.io.IOException; @@ -52,7 +53,7 @@ static ShapeBuilder parse(XContentParser parser, GeoShapeFieldMapper shapeMapper } if (parser.currentToken() == XContentParser.Token.START_OBJECT) { return GeoJsonParser.parse(parser, shapeMapper); } else if (parser.currentToken() == XContentParser.Token.VALUE_STRING) { - return GeoWKTParser.parse(parser); + return GeoWKTParser.parse(parser, shapeMapper); } throw new ElasticsearchParseException("shape must be an object consisting of type and coordinates"); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java index 7b9eb5f067a67..bc9f8b660be01 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java @@ -29,6 +29,7 @@ import org.apache.lucene.search.TermQuery; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Explicit; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.settings.Settings; @@ -57,11 +58,13 @@ public class GeoPointFieldMapper extends FieldMapper implements ArrayValueMapper public static class Names { public static final String IGNORE_MALFORMED = "ignore_malformed"; + public static final ParseField IGNORE_Z_VALUE = new ParseField("ignore_z_value"); } public static class Defaults { public static final Explicit IGNORE_MALFORMED = new Explicit<>(false, false); public static final GeoPointFieldType FIELD_TYPE = new GeoPointFieldType(); + public static final Explicit IGNORE_Z_VALUE = new Explicit<>(true, false); static { FIELD_TYPE.setTokenized(false); @@ -73,6 +76,7 @@ public static class Defaults { public static class Builder extends FieldMapper.Builder { protected Boolean ignoreMalformed; + private Boolean ignoreZValue; public Builder(String name) { super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE); @@ -94,19 +98,32 @@ protected Explicit ignoreMalformed(BuilderContext context) { return GeoPointFieldMapper.Defaults.IGNORE_MALFORMED; } + protected Explicit ignoreZValue(BuilderContext context) { + if (ignoreZValue != null) { + return new Explicit<>(ignoreZValue, true); + } + return Defaults.IGNORE_Z_VALUE; + } + + public Builder ignoreZValue(final boolean ignoreZValue) { + this.ignoreZValue = ignoreZValue; + return this; + } + public GeoPointFieldMapper build(BuilderContext context, String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings, MultiFields multiFields, Explicit ignoreMalformed, - CopyTo copyTo) { + Explicit ignoreZValue, CopyTo copyTo) { setupFieldType(context); return new GeoPointFieldMapper(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, - ignoreMalformed, copyTo); + ignoreMalformed, ignoreZValue, copyTo); } @Override public GeoPointFieldMapper build(BuilderContext context) { return build(context, name, fieldType, defaultFieldType, context.indexSettings(), - multiFieldsBuilder.build(this, context), ignoreMalformed(context), copyTo); + multiFieldsBuilder.build(this, context), ignoreMalformed(context), + ignoreZValue(context), copyTo); } } @@ -125,6 +142,10 @@ public Mapper.Builder parse(String name, Map node, ParserContext if (propName.equals(Names.IGNORE_MALFORMED)) { builder.ignoreMalformed(TypeParsers.nodeBooleanValue(name, Names.IGNORE_MALFORMED, propNode, parserContext)); iterator.remove(); + } else if (propName.equals(Names.IGNORE_Z_VALUE.getPreferredName())) { + builder.ignoreZValue(TypeParsers.nodeBooleanValue(propName, Names.IGNORE_Z_VALUE.getPreferredName(), + propNode, parserContext)); + iterator.remove(); } } @@ -133,12 +154,14 @@ public Mapper.Builder parse(String name, Map node, ParserContext } protected Explicit ignoreMalformed; + protected Explicit ignoreZValue; public GeoPointFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings, MultiFields multiFields, Explicit ignoreMalformed, - CopyTo copyTo) { + Explicit ignoreZValue, CopyTo copyTo) { super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo); this.ignoreMalformed = ignoreMalformed; + this.ignoreZValue = ignoreZValue; } @Override @@ -148,6 +171,9 @@ protected void doMerge(Mapper mergeWith) { if (gpfmMergeWith.ignoreMalformed.explicit()) { this.ignoreMalformed = gpfmMergeWith.ignoreMalformed; } + if (gpfmMergeWith.ignoreZValue.explicit()) { + this.ignoreZValue = gpfmMergeWith.ignoreZValue; + } } @Override @@ -264,12 +290,18 @@ public Mapper parse(ParseContext context) throws IOException { double lon = context.parser().doubleValue(); token = context.parser().nextToken(); double lat = context.parser().doubleValue(); - while ((token = context.parser().nextToken()) != XContentParser.Token.END_ARRAY); + token = context.parser().nextToken(); + Double alt = Double.NaN; + if (token == XContentParser.Token.VALUE_NUMBER) { + alt = GeoPoint.assertZValue(ignoreZValue.value(), context.parser().doubleValue()); + } else if (token != XContentParser.Token.END_ARRAY) { + throw new ElasticsearchParseException("[{}] field type does not accept > 3 dimensions", CONTENT_TYPE); + } parse(context, sparse.reset(lat, lon)); } else { while (token != XContentParser.Token.END_ARRAY) { if (token == XContentParser.Token.VALUE_STRING) { - parsePointFromString(context, sparse, context.parser().text()); + parse(context, sparse.resetFromString(context.parser().text(), ignoreZValue.value())); } else { try { parse(context, GeoUtils.parseGeoPoint(context.parser(), sparse)); @@ -284,7 +316,7 @@ public Mapper parse(ParseContext context) throws IOException { } } } else if (token == XContentParser.Token.VALUE_STRING) { - parsePointFromString(context, sparse, context.parser().text()); + parse(context, sparse.resetFromString(context.parser().text(), ignoreZValue.value())); } else if (token != XContentParser.Token.VALUE_NULL) { try { parse(context, GeoUtils.parseGeoPoint(context.parser(), sparse)); @@ -300,19 +332,18 @@ public Mapper parse(ParseContext context) throws IOException { return null; } - private void parsePointFromString(ParseContext context, GeoPoint sparse, String point) throws IOException { - if (point.indexOf(',') < 0) { - parse(context, sparse.resetFromGeoHash(point)); - } else { - parse(context, sparse.resetFromString(point)); - } - } - @Override protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException { super.doXContentBody(builder, includeDefaults, params); if (includeDefaults || ignoreMalformed.explicit()) { builder.field(GeoPointFieldMapper.Names.IGNORE_MALFORMED, ignoreMalformed.value()); } + if (includeDefaults || ignoreZValue.explicit()) { + builder.field(Names.IGNORE_Z_VALUE.getPreferredName(), ignoreZValue.value()); + } + } + + public Explicit ignoreZValue() { + return ignoreZValue; } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java index 4057ab9492403..b80831298cb87 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java @@ -101,6 +101,7 @@ public static class Defaults { public static final double LEGACY_DISTANCE_ERROR_PCT = 0.025d; public static final Explicit COERCE = new Explicit<>(false, false); public static final Explicit IGNORE_MALFORMED = new Explicit<>(false, false); + public static final Explicit IGNORE_Z_VALUE = new Explicit<>(true, false); public static final MappedFieldType FIELD_TYPE = new GeoShapeFieldType(); @@ -121,6 +122,7 @@ public static class Builder extends FieldMapper.Builder ignoreMalformed(BuilderContext context) { return Defaults.IGNORE_MALFORMED; } + protected Explicit ignoreZValue(BuilderContext context) { + if (ignoreZValue != null) { + return new Explicit<>(ignoreZValue, true); + } + return Defaults.IGNORE_Z_VALUE; + } + + public Builder ignoreZValue(final boolean ignoreZValue) { + this.ignoreZValue = ignoreZValue; + return this; + } + @Override public GeoShapeFieldMapper build(BuilderContext context) { GeoShapeFieldType geoShapeFieldType = (GeoShapeFieldType)fieldType; @@ -175,8 +189,8 @@ public GeoShapeFieldMapper build(BuilderContext context) { } setupFieldType(context); - return new GeoShapeFieldMapper(name, fieldType, ignoreMalformed(context), coerce(context), context.indexSettings(), - multiFieldsBuilder.build(this, context), copyTo); + return new GeoShapeFieldMapper(name, fieldType, ignoreMalformed(context), coerce(context), ignoreZValue(context), + context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); } } @@ -213,6 +227,10 @@ public Mapper.Builder parse(String name, Map node, ParserContext } else if (Names.COERCE.equals(fieldName)) { builder.coerce(TypeParsers.nodeBooleanValue(fieldName, Names.COERCE, fieldNode, parserContext)); iterator.remove(); + } else if (GeoPointFieldMapper.Names.IGNORE_Z_VALUE.getPreferredName().equals(fieldName)) { + builder.ignoreZValue(TypeParsers.nodeBooleanValue(fieldName, GeoPointFieldMapper.Names.IGNORE_Z_VALUE.getPreferredName(), + fieldNode, parserContext)); + iterator.remove(); } else if (Names.STRATEGY_POINTS_ONLY.equals(fieldName) && builder.fieldType().strategyName.equals(SpatialStrategy.TERM.getStrategyName()) == false) { boolean pointsOnly = TypeParsers.nodeBooleanValue(fieldName, Names.STRATEGY_POINTS_ONLY, fieldNode, parserContext); @@ -444,12 +462,15 @@ public Query termQuery(Object value, QueryShardContext context) { protected Explicit coerce; protected Explicit ignoreMalformed; + protected Explicit ignoreZValue; public GeoShapeFieldMapper(String simpleName, MappedFieldType fieldType, Explicit ignoreMalformed, - Explicit coerce, Settings indexSettings, MultiFields multiFields, CopyTo copyTo) { + Explicit coerce, Explicit ignoreZValue, Settings indexSettings, + MultiFields multiFields, CopyTo copyTo) { super(simpleName, fieldType, Defaults.FIELD_TYPE, indexSettings, multiFields, copyTo); this.coerce = coerce; this.ignoreMalformed = ignoreMalformed; + this.ignoreZValue = ignoreZValue; } @Override @@ -513,6 +534,9 @@ protected void doMerge(Mapper mergeWith) { if (gsfm.ignoreMalformed.explicit()) { this.ignoreMalformed = gsfm.ignoreMalformed; } + if (gsfm.ignoreZValue.explicit()) { + this.ignoreZValue = gsfm.ignoreZValue; + } } @Override @@ -546,6 +570,9 @@ protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, if (includeDefaults || ignoreMalformed.explicit()) { builder.field(IGNORE_MALFORMED, ignoreMalformed.value()); } + if (includeDefaults || ignoreZValue.explicit()) { + builder.field(GeoPointFieldMapper.Names.IGNORE_Z_VALUE.getPreferredName(), ignoreZValue.value()); + } } public Explicit coerce() { @@ -556,6 +583,10 @@ public Explicit ignoreMalformed() { return ignoreMalformed; } + public Explicit ignoreZValue() { + return ignoreZValue; + } + @Override protected String contentType() { return CONTENT_TYPE; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java index c51bb83741ac4..d8414c7b31f94 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java @@ -263,7 +263,7 @@ public VS toValuesSource(QueryShardContext context) throws IOException { return (VS) MissingValues.replaceMissing((ValuesSource.Numeric) vs, missing); } else if (vs instanceof ValuesSource.GeoPoint) { // TODO: also support the structured formats of geo points - final GeoPoint missing = GeoUtils.parseGeoPoint(missing().toString(), new GeoPoint()); + final GeoPoint missing = new GeoPoint(missing().toString()); return (VS) MissingValues.replaceMissing((ValuesSource.GeoPoint) vs, missing); } else { // Should not happen diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java index b464d6069e79e..c4f7d8a500064 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java @@ -133,7 +133,7 @@ protected XContentBuilder toInnerXContent(XContentBuilder builder, Params params *
  • String/Object/Array:
    "GEO POINT"
  • * * - * see {@link GeoUtils#parseGeoPoint(String, GeoPoint)} for GEO POINT + * see {@code GeoPoint(String)} for GEO POINT */ @Override public Set parseContext(ParseContext parseContext, XContentParser parser) throws IOException, ElasticsearchParseException { @@ -249,7 +249,7 @@ protected GeoQueryContext fromXContent(XContentParser parser) throws IOException * *
  • String:
    GEO POINT
  • * - * see {@link GeoUtils#parseGeoPoint(String, GeoPoint)} for GEO POINT + * see {@code GeoPoint(String)} for GEO POINT */ @Override public List toInternalQueryContexts(List queryContexts) { diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeoJsonShapeParserTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeoJsonShapeParserTests.java index 98a7fe514543f..0a0b9d6583bbb 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeoJsonShapeParserTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeoJsonShapeParserTests.java @@ -28,11 +28,18 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Strings; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.geo.parsers.ShapeParser; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.index.mapper.ContentPath; +import org.elasticsearch.index.mapper.GeoShapeFieldMapper; +import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.test.hamcrest.ElasticsearchGeoAssertions; import org.locationtech.spatial4j.exception.InvalidShapeException; import org.locationtech.spatial4j.shape.Circle; @@ -135,8 +142,9 @@ public void testParseMultiDimensionShapes() throws IOException { .startArray("coordinates").value(100.0).value(0.0).value(15.0).value(18.0).endArray() .endObject(); - Point expectedPt = GEOMETRY_FACTORY.createPoint(new Coordinate(100.0, 0.0)); - assertGeometryEquals(new JtsPoint(expectedPt, SPATIAL_CONTEXT), pointGeoJson); + XContentParser parser = createParser(pointGeoJson); + parser.nextToken(); + ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchParseException.class); // multi dimension linestring XContentBuilder lineGeoJson = XContentFactory.jsonBuilder() @@ -148,13 +156,9 @@ public void testParseMultiDimensionShapes() throws IOException { .endArray() .endObject(); - List lineCoordinates = new ArrayList<>(); - lineCoordinates.add(new Coordinate(100, 0)); - lineCoordinates.add(new Coordinate(101, 1)); - - LineString expectedLS = GEOMETRY_FACTORY.createLineString( - lineCoordinates.toArray(new Coordinate[lineCoordinates.size()])); - assertGeometryEquals(jtsGeom(expectedLS), lineGeoJson); + parser = createParser(lineGeoJson); + parser.nextToken(); + ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchParseException.class); } @Override @@ -231,6 +235,61 @@ public void testParsePolygon() throws IOException { assertGeometryEquals(jtsGeom(expected), polygonGeoJson); } + public void testParse3DPolygon() throws IOException { + XContentBuilder polygonGeoJson = XContentFactory.jsonBuilder() + .startObject() + .field("type", "Polygon") + .startArray("coordinates") + .startArray() + .startArray().value(100.0).value(1.0).value(10.0).endArray() + .startArray().value(101.0).value(1.0).value(10.0).endArray() + .startArray().value(101.0).value(0.0).value(10.0).endArray() + .startArray().value(100.0).value(0.0).value(10.0).endArray() + .startArray().value(100.0).value(1.0).value(10.0).endArray() + .endArray() + .endArray() + .endObject(); + + List shellCoordinates = new ArrayList<>(); + shellCoordinates.add(new Coordinate(100, 0, 10)); + shellCoordinates.add(new Coordinate(101, 0, 10)); + shellCoordinates.add(new Coordinate(101, 1, 10)); + shellCoordinates.add(new Coordinate(100, 1, 10)); + shellCoordinates.add(new Coordinate(100, 0, 10)); + + Settings indexSettings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()).build(); + LinearRing shell = GEOMETRY_FACTORY.createLinearRing(shellCoordinates.toArray(new Coordinate[shellCoordinates.size()])); + Polygon expected = GEOMETRY_FACTORY.createPolygon(shell, null); + Mapper.BuilderContext mockBuilderContext = new Mapper.BuilderContext(indexSettings, new ContentPath()); + final GeoShapeFieldMapper mapperBuilder = new GeoShapeFieldMapper.Builder("test").ignoreZValue(true).build(mockBuilderContext); + XContentParser parser = createParser(polygonGeoJson); + parser.nextToken(); + ElasticsearchGeoAssertions.assertEquals(jtsGeom(expected), ShapeParser.parse(parser, mapperBuilder).build()); + } + + public void testInvalidDimensionalPolygon() throws IOException { + XContentBuilder polygonGeoJson = XContentFactory.jsonBuilder() + .startObject() + .field("type", "Polygon") + .startArray("coordinates") + .startArray() + .startArray().value(100.0).value(1.0).value(10.0).endArray() + .startArray().value(101.0).value(1.0).endArray() + .startArray().value(101.0).value(0.0).value(10.0).endArray() + .startArray().value(100.0).value(0.0).value(10.0).endArray() + .startArray().value(100.0).value(1.0).value(10.0).endArray() + .endArray() + .endArray() + .endObject(); + XContentParser parser = createParser(polygonGeoJson); + parser.nextToken(); + ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchParseException.class); + } + public void testParseInvalidPoint() throws IOException { // test case 1: create an invalid point object with multipoint data format XContentBuilder invalidPoint1 = XContentFactory.jsonBuilder() @@ -326,6 +385,46 @@ public void testParseInvalidMultiPolygon() throws IOException { ElasticsearchGeoAssertions.assertValidException(parser, InvalidShapeException.class); } + public void testParseInvalidDimensionalMultiPolygon() throws IOException { + // test invalid multipolygon (an "accidental" polygon with inner rings outside outer ring) + String multiPolygonGeoJson = Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .field("type", "MultiPolygon") + .startArray("coordinates") + .startArray()//first poly (without holes) + .startArray() + .startArray().value(102.0).value(2.0).endArray() + .startArray().value(103.0).value(2.0).endArray() + .startArray().value(103.0).value(3.0).endArray() + .startArray().value(102.0).value(3.0).endArray() + .startArray().value(102.0).value(2.0).endArray() + .endArray() + .endArray() + .startArray()//second poly (with hole) + .startArray() + .startArray().value(100.0).value(0.0).endArray() + .startArray().value(101.0).value(0.0).endArray() + .startArray().value(101.0).value(1.0).endArray() + .startArray().value(100.0).value(1.0).endArray() + .startArray().value(100.0).value(0.0).endArray() + .endArray() + .startArray()//hole + .startArray().value(100.2).value(0.8).endArray() + .startArray().value(100.2).value(0.2).value(10.0).endArray() + .startArray().value(100.8).value(0.2).endArray() + .startArray().value(100.8).value(0.8).endArray() + .startArray().value(100.2).value(0.8).endArray() + .endArray() + .endArray() + .endArray() + .endObject()); + + XContentParser parser = createParser(JsonXContent.jsonXContent, multiPolygonGeoJson); + parser.nextToken(); + ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchParseException.class); + } + + public void testParseOGCPolygonWithoutHoles() throws IOException { // test 1: ccw poly not crossing dateline String polygonGeoJson = Strings.toString(XContentFactory.jsonBuilder().startObject().field("type", "Polygon") diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java index 7249277338322..0a113549d1664 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java @@ -25,7 +25,11 @@ import com.vividsolutions.jts.geom.Point; import com.vividsolutions.jts.geom.Polygon; import org.apache.lucene.geo.GeoTestUtil; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.geo.builders.CoordinatesBuilder; import org.elasticsearch.common.geo.builders.EnvelopeBuilder; import org.elasticsearch.common.geo.builders.GeometryCollectionBuilder; @@ -37,9 +41,14 @@ import org.elasticsearch.common.geo.builders.PolygonBuilder; import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.common.geo.parsers.GeoWKTParser; +import org.elasticsearch.common.geo.parsers.ShapeParser; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.mapper.ContentPath; +import org.elasticsearch.index.mapper.GeoShapeFieldMapper; +import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.test.geo.RandomShapeGenerator; import org.locationtech.spatial4j.exception.InvalidShapeException; import org.locationtech.spatial4j.shape.Rectangle; @@ -80,7 +89,7 @@ private void assertExpected(Shape expected, ShapeBuilder builder) throws IOExcep assertGeometryEquals(expected, xContentBuilder); } - private void assertMalformed(Shape expected, ShapeBuilder builder) throws IOException { + private void assertMalformed(ShapeBuilder builder) throws IOException { XContentBuilder xContentBuilder = toWKTContent(builder, true); assertValidException(xContentBuilder, ElasticsearchParseException.class); } @@ -91,7 +100,7 @@ public void testParsePoint() throws IOException { Coordinate c = new Coordinate(p.lon(), p.lat()); Point expected = GEOMETRY_FACTORY.createPoint(c); assertExpected(new JtsPoint(expected, SPATIAL_CONTEXT), new PointBuilder().coordinate(c)); - assertMalformed(new JtsPoint(expected, SPATIAL_CONTEXT), new PointBuilder().coordinate(c)); + assertMalformed(new PointBuilder().coordinate(c)); } @Override @@ -107,7 +116,7 @@ public void testParseMultiPoint() throws IOException { } ShapeCollection expected = shapeCollection(shapes); assertExpected(expected, new MultiPointBuilder(coordinates)); - assertMalformed(expected, new MultiPointBuilder(coordinates)); + assertMalformed(new MultiPointBuilder(coordinates)); } private List randomLineStringCoords() { @@ -142,7 +151,7 @@ public void testParseMultiLineString() throws IOException { MultiLineString expected = GEOMETRY_FACTORY.createMultiLineString( lineStrings.toArray(new LineString[lineStrings.size()])); assertExpected(jtsGeom(expected), builder); - assertMalformed(jtsGeom(expected), builder); + assertMalformed(builder); } @Override @@ -153,7 +162,7 @@ public void testParsePolygon() throws IOException { LinearRing shell = GEOMETRY_FACTORY.createLinearRing(coords); Polygon expected = GEOMETRY_FACTORY.createPolygon(shell, null); assertExpected(jtsGeom(expected), builder); - assertMalformed(jtsGeom(expected), builder); + assertMalformed(builder); } @Override @@ -173,16 +182,16 @@ public void testParseMultiPolygon() throws IOException { } Shape expected = shapeCollection(shapes); assertExpected(expected, builder); - assertMalformed(expected, builder); + assertMalformed(builder); } public void testParsePolygonWithHole() throws IOException { // add 3d point to test ISSUE #10501 List shellCoordinates = new ArrayList<>(); - shellCoordinates.add(new Coordinate(100, 0, 15.0)); + shellCoordinates.add(new Coordinate(100, 0)); shellCoordinates.add(new Coordinate(101, 0)); shellCoordinates.add(new Coordinate(101, 1)); - shellCoordinates.add(new Coordinate(100, 1, 10.0)); + shellCoordinates.add(new Coordinate(100, 1)); shellCoordinates.add(new Coordinate(100, 0)); List holeCoordinates = new ArrayList<>(); @@ -203,7 +212,110 @@ public void testParsePolygonWithHole() throws IOException { Polygon expected = GEOMETRY_FACTORY.createPolygon(shell, holes); assertExpected(jtsGeom(expected), polygonWithHole); - assertMalformed(jtsGeom(expected), polygonWithHole); + assertMalformed(polygonWithHole); + } + + public void testParseMixedDimensionPolyWithHole() throws IOException { + List shellCoordinates = new ArrayList<>(); + shellCoordinates.add(new Coordinate(100, 0)); + shellCoordinates.add(new Coordinate(101, 0)); + shellCoordinates.add(new Coordinate(101, 1)); + shellCoordinates.add(new Coordinate(100, 1)); + shellCoordinates.add(new Coordinate(100, 0)); + + // add 3d point to test ISSUE #10501 + List holeCoordinates = new ArrayList<>(); + holeCoordinates.add(new Coordinate(100.2, 0.2, 15.0)); + holeCoordinates.add(new Coordinate(100.8, 0.2)); + holeCoordinates.add(new Coordinate(100.8, 0.8)); + holeCoordinates.add(new Coordinate(100.2, 0.8, 10.0)); + holeCoordinates.add(new Coordinate(100.2, 0.2)); + + PolygonBuilder builder = new PolygonBuilder(new CoordinatesBuilder().coordinates(shellCoordinates)); + builder.hole(new LineStringBuilder(holeCoordinates)); + + XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().value(builder.toWKT()); + XContentParser parser = createParser(xContentBuilder); + parser.nextToken(); + + Settings indexSettings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()).build(); + + Mapper.BuilderContext mockBuilderContext = new Mapper.BuilderContext(indexSettings, new ContentPath()); + final GeoShapeFieldMapper mapperBuilder = new GeoShapeFieldMapper.Builder("test").ignoreZValue(false).build(mockBuilderContext); + + // test store z disabled + ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, + () -> ShapeParser.parse(parser, mapperBuilder)); + assertThat(e, hasToString(containsString("but [ignore_z_value] parameter is [false]"))); + } + + public void testParseMixedDimensionPolyWithHoleStoredZ() throws IOException { + List shellCoordinates = new ArrayList<>(); + shellCoordinates.add(new Coordinate(100, 0)); + shellCoordinates.add(new Coordinate(101, 0)); + shellCoordinates.add(new Coordinate(101, 1)); + shellCoordinates.add(new Coordinate(100, 1)); + shellCoordinates.add(new Coordinate(100, 0)); + + // add 3d point to test ISSUE #10501 + List holeCoordinates = new ArrayList<>(); + holeCoordinates.add(new Coordinate(100.2, 0.2, 15.0)); + holeCoordinates.add(new Coordinate(100.8, 0.2)); + holeCoordinates.add(new Coordinate(100.8, 0.8)); + holeCoordinates.add(new Coordinate(100.2, 0.8, 10.0)); + holeCoordinates.add(new Coordinate(100.2, 0.2)); + + PolygonBuilder builder = new PolygonBuilder(new CoordinatesBuilder().coordinates(shellCoordinates)); + builder.hole(new LineStringBuilder(holeCoordinates)); + + XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().value(builder.toWKT()); + XContentParser parser = createParser(xContentBuilder); + parser.nextToken(); + + Settings indexSettings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()).build(); + + Mapper.BuilderContext mockBuilderContext = new Mapper.BuilderContext(indexSettings, new ContentPath()); + final GeoShapeFieldMapper mapperBuilder = new GeoShapeFieldMapper.Builder("test").ignoreZValue(true).build(mockBuilderContext); + + // test store z disabled + ElasticsearchException e = expectThrows(ElasticsearchException.class, + () -> ShapeParser.parse(parser, mapperBuilder)); + assertThat(e, hasToString(containsString("unable to add coordinate to CoordinateBuilder: coordinate dimensions do not match"))); + } + + public void testParsePolyWithStoredZ() throws IOException { + List shellCoordinates = new ArrayList<>(); + shellCoordinates.add(new Coordinate(100, 0, 0)); + shellCoordinates.add(new Coordinate(101, 0, 0)); + shellCoordinates.add(new Coordinate(101, 1, 0)); + shellCoordinates.add(new Coordinate(100, 1, 5)); + shellCoordinates.add(new Coordinate(100, 0, 5)); + + PolygonBuilder builder = new PolygonBuilder(new CoordinatesBuilder().coordinates(shellCoordinates)); + + XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().value(builder.toWKT()); + XContentParser parser = createParser(xContentBuilder); + parser.nextToken(); + + Settings indexSettings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()).build(); + + Mapper.BuilderContext mockBuilderContext = new Mapper.BuilderContext(indexSettings, new ContentPath()); + final GeoShapeFieldMapper mapperBuilder = new GeoShapeFieldMapper.Builder("test").ignoreZValue(true).build(mockBuilderContext); + + ShapeBuilder shapeBuilder = ShapeParser.parse(parser, mapperBuilder); + assertEquals(shapeBuilder.numDimensions(), 3); } public void testParseSelfCrossingPolygon() throws IOException { @@ -235,7 +347,7 @@ public void testParseEnvelope() throws IOException { EnvelopeBuilder builder = new EnvelopeBuilder(new Coordinate(r.minLon, r.maxLat), new Coordinate(r.maxLon, r.minLat)); Rectangle expected = SPATIAL_CONTEXT.makeRectangle(r.minLon, r.maxLon, r.minLat, r.maxLat); assertExpected(expected, builder); - assertMalformed(expected, builder); + assertMalformed(builder); } public void testInvalidGeometryType() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java b/server/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java index d1f7d5601a6cc..22877b8ff3b3c 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java @@ -653,4 +653,49 @@ public void testInvalidShapeWithConsecutiveDuplicatePoints() { Exception e = expectThrows(InvalidShapeException.class, () -> builder.close().build()); assertThat(e.getMessage(), containsString("duplicate consecutive coordinates at: (")); } + + public void testPolygon3D() { + String expected = "{\n" + + " \"type\" : \"polygon\",\n" + + " \"orientation\" : \"right\",\n" + + " \"coordinates\" : [\n" + + " [\n" + + " [\n" + + " -45.0,\n" + + " 30.0,\n" + + " 100.0\n" + + " ],\n" + + " [\n" + + " 45.0,\n" + + " 30.0,\n" + + " 75.0\n" + + " ],\n" + + " [\n" + + " 45.0,\n" + + " -30.0,\n" + + " 77.0\n" + + " ],\n" + + " [\n" + + " -45.0,\n" + + " -30.0,\n" + + " 101.0\n" + + " ],\n" + + " [\n" + + " -45.0,\n" + + " 30.0,\n" + + " 110.0\n" + + " ]\n" + + " ]\n" + + " ]\n" + + "}"; + + PolygonBuilder pb = new PolygonBuilder(new CoordinatesBuilder() + .coordinate(new Coordinate(-45, 30, 100)) + .coordinate(new Coordinate(45, 30, 75)) + .coordinate(new Coordinate(45, -30, 77)) + .coordinate(new Coordinate(-45, -30, 101)) + .coordinate(new Coordinate(-45, 30, 110))); + + assertEquals(expected, pb.toString()); + } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java index 40fc0e81a920c..03cc183b906d3 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java @@ -34,14 +34,17 @@ import org.elasticsearch.test.geo.RandomGeoGenerator; import org.hamcrest.CoreMatchers; +import java.io.IOException; import java.util.Collection; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.elasticsearch.common.geo.GeoHashUtils.stringEncode; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.index.mapper.GeoPointFieldMapper.Names.IGNORE_Z_VALUE; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.notNullValue; public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { @@ -121,6 +124,43 @@ public void testLatLonInOneValue() throws Exception { assertThat(doc.rootDoc().getField("point"), notNullValue()); } + public void testLatLonStringWithZValue() throws Exception { + XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("point").field("type", "geo_point") + .field(IGNORE_Z_VALUE.getPreferredName(), true); + String mapping = Strings.toString(xContentBuilder.endObject().endObject().endObject().endObject()); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", + new CompressedXContent(mapping)); + + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference + .bytes(XContentFactory.jsonBuilder() + .startObject() + .field("point", "1.2,1.3,10.0") + .endObject()), + XContentType.JSON)); + + assertThat(doc.rootDoc().getField("point"), notNullValue()); + } + + public void testLatLonStringWithZValueException() throws Exception { + XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("point").field("type", "geo_point") + .field(IGNORE_Z_VALUE.getPreferredName(), false); + String mapping = Strings.toString(xContentBuilder.endObject().endObject().endObject().endObject()); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", + new CompressedXContent(mapping)); + + SourceToParse source = SourceToParse.source("test", "type", "1", BytesReference + .bytes(XContentFactory.jsonBuilder() + .startObject() + .field("point", "1.2,1.3,10.0") + .endObject()), + XContentType.JSON); + + Exception e = expectThrows(MapperParsingException.class, () -> defaultMapper.parse(source)); + assertThat(e.getCause().getMessage(), containsString("but [ignore_z_value] parameter is [false]")); + } + public void testLatLonInOneValueStored() throws Exception { XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("point").field("type", "geo_point"); @@ -230,6 +270,41 @@ public void testLonLatArrayArrayStored() throws Exception { assertThat(doc.rootDoc().getFields("point").length, CoreMatchers.equalTo(4)); } + /** + * Test that accept_z_value parameter correctly parses + */ + public void testIgnoreZValue() throws IOException { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type1") + .startObject("properties").startObject("location") + .field("type", "geo_point") + .field(IGNORE_Z_VALUE.getPreferredName(), "true") + .endObject().endObject() + .endObject().endObject()); + + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); + FieldMapper fieldMapper = defaultMapper.mappers().getMapper("location"); + assertThat(fieldMapper, instanceOf(GeoPointFieldMapper.class)); + + boolean ignoreZValue = ((GeoPointFieldMapper)fieldMapper).ignoreZValue().value(); + assertThat(ignoreZValue, equalTo(true)); + + // explicit false accept_z_value test + mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type1") + .startObject("properties").startObject("location") + .field("type", "geo_point") + .field(IGNORE_Z_VALUE.getPreferredName(), "false") + .endObject().endObject() + .endObject().endObject()); + + defaultMapper = createIndex("test2").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + fieldMapper = defaultMapper.mappers().getMapper("location"); + assertThat(fieldMapper, instanceOf(GeoPointFieldMapper.class)); + + ignoreZValue = ((GeoPointFieldMapper)fieldMapper).ignoreZValue().value(); + assertThat(ignoreZValue, equalTo(false)); + } + public void testMultiField() throws Exception { int numDocs = randomIntBetween(10, 100); String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("pin").startObject("properties").startObject("location") diff --git a/server/src/test/java/org/elasticsearch/index/mapper/GeoShapeFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/GeoShapeFieldMapperTests.java index fb143cc3898e4..201e749cd22e7 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/GeoShapeFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/GeoShapeFieldMapperTests.java @@ -35,6 +35,7 @@ import java.io.IOException; import java.util.Collection; +import static org.elasticsearch.index.mapper.GeoPointFieldMapper.Names.IGNORE_Z_VALUE; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -138,6 +139,42 @@ public void testCoerceParsing() throws IOException { assertThat(coerce, equalTo(false)); } + + /** + * Test that accept_z_value parameter correctly parses + */ + public void testIgnoreZValue() throws IOException { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type1") + .startObject("properties").startObject("location") + .field("type", "geo_shape") + .field(IGNORE_Z_VALUE.getPreferredName(), "true") + .endObject().endObject() + .endObject().endObject()); + + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); + FieldMapper fieldMapper = defaultMapper.mappers().getMapper("location"); + assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class)); + + boolean ignoreZValue = ((GeoShapeFieldMapper)fieldMapper).ignoreZValue().value(); + assertThat(ignoreZValue, equalTo(true)); + + // explicit false accept_z_value test + mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type1") + .startObject("properties").startObject("location") + .field("type", "geo_shape") + .field(IGNORE_Z_VALUE.getPreferredName(), "false") + .endObject().endObject() + .endObject().endObject()); + + defaultMapper = createIndex("test2").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + fieldMapper = defaultMapper.mappers().getMapper("location"); + assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class)); + + ignoreZValue = ((GeoShapeFieldMapper)fieldMapper).ignoreZValue().value(); + assertThat(ignoreZValue, equalTo(false)); + } + /** * Test that ignore_malformed parameter correctly parses */ diff --git a/server/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java b/server/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java index 204b71e82a192..4ddb80c4b0633 100644 --- a/server/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java +++ b/server/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java @@ -410,6 +410,19 @@ public void testParseGeoPoint() throws IOException { } } + public void testParseGeoPointStringZValueError() throws IOException { + double lat = randomDouble() * 180 - 90 + randomIntBetween(-1000, 1000) * 180; + double lon = randomDouble() * 360 - 180 + randomIntBetween(-1000, 1000) * 360; + double alt = randomDouble() * 1000; + XContentBuilder json = jsonBuilder().startObject().field("foo", lat + "," + lon + "," + alt).endObject(); + XContentParser parser = createParser(json); + while (parser.currentToken() != Token.VALUE_STRING) { + parser.nextToken(); + } + Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser, new GeoPoint(), false)); + assertThat(e.getMessage(), containsString("but [ignore_z_value] parameter is [false]")); + } + public void testParseGeoPointGeohash() throws IOException { for (int i = 0; i < 100; i++) { int geoHashLength = randomIntBetween(1, GeoHashUtils.PRECISION); @@ -509,7 +522,21 @@ public void testParseGeoPointArrayTooManyValues() throws IOException { parser.nextToken(); } Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); - assertThat(e.getMessage(), is("only two values allowed")); + assertThat(e.getMessage(), is("Exception parsing coordinates: found Z value [0.0] but [ignore_z_value] parameter is [false]")); + } + + public void testParseGeoPointArray3D() throws IOException { + double lat = 90.0; + double lon = -180.0; + double elev = 0.0; + XContentBuilder json = jsonBuilder().startObject().startArray("foo").value(lon).value(lat).value(elev).endArray().endObject(); + XContentParser parser = createParser(json); + while (parser.currentToken() != Token.START_ARRAY) { + parser.nextToken(); + } + GeoPoint point = GeoUtils.parseGeoPoint(parser, new GeoPoint(), true); + assertThat(point.lat(), equalTo(lat)); + assertThat(point.lon(), equalTo(lon)); } public void testParseGeoPointArrayWrongType() throws IOException { From 8c59e43ac72bd0da411da23471643eb13e270dac Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 23 Mar 2018 10:11:50 -0400 Subject: [PATCH 22/27] Docs: HighLevelRestClient#multiSearch (#29144) Adds docs for `HighLevelRestClient#multiSearch`. Unlike the `multiGet` docs these are much more sparse because multi-search doesn't support setting many options on the `MultiSearchRequest` and instead just wraps a list of `SearchRequest`s. Closes #28389 --- .../documentation/SearchDocumentationIT.java | 124 +++++++++++++----- .../high-level/search/multi-search.asciidoc | 90 +++++++++++++ .../high-level/search/search.asciidoc | 4 +- .../high-level/supported-apis.asciidoc | 2 + 4 files changed, 186 insertions(+), 34 deletions(-) create mode 100644 docs/java-rest/high-level/search/multi-search.asciidoc diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java index 22421dec6d9b9..96d962c3ac553 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java @@ -27,6 +27,8 @@ import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.ClearScrollRequest; import org.elasticsearch.action.search.ClearScrollResponse; +import org.elasticsearch.action.search.MultiSearchRequest; +import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchScrollRequest; @@ -85,45 +87,15 @@ import static org.hamcrest.Matchers.greaterThan; /** - * This class is used to generate the Java High Level REST Client Search API documentation. - *

    - * You need to wrap your code between two tags like: - * // tag::example - * // end::example - *

    - * Where example is your tag name. - *

    - * Then in the documentation, you can extract what is between tag and end tags with - * ["source","java",subs="attributes,callouts,macros"] - * -------------------------------------------------- - * include-tagged::{doc-tests}/SearchDocumentationIT.java[example] - * -------------------------------------------------- - *

    - * The column width of the code block is 84. If the code contains a line longer - * than 84, the line will be cut and a horizontal scroll bar will be displayed. - * (the code indentation of the tag is not included in the width) + * Documentation for search APIs in the high level java client. + * Code wrapped in {@code tag} and {@code end} tags is included in the docs. */ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase { @SuppressWarnings({"unused", "unchecked"}) public void testSearch() throws Exception { + indexSearchTestData(); RestHighLevelClient client = highLevelClient(); - { - BulkRequest request = new BulkRequest(); - request.add(new IndexRequest("posts", "doc", "1") - .source(XContentType.JSON, "title", "In which order are my Elasticsearch queries executed?", "user", - Arrays.asList("kimchy", "luca"), "innerObject", Collections.singletonMap("key", "value"))); - request.add(new IndexRequest("posts", "doc", "2") - .source(XContentType.JSON, "title", "Current status and upcoming changes in Elasticsearch", "user", - Arrays.asList("kimchy", "christoph"), "innerObject", Collections.singletonMap("key", "value"))); - request.add(new IndexRequest("posts", "doc", "3") - .source(XContentType.JSON, "title", "The Future of Federated Search in Elasticsearch", "user", - Arrays.asList("kimchy", "tanguy"), "innerObject", Collections.singletonMap("key", "value"))); - request.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); - BulkResponse bulkResponse = client.bulk(request); - assertSame(RestStatus.OK, bulkResponse.status()); - assertFalse(bulkResponse.hasFailures()); - } { // tag::search-request-basic SearchRequest searchRequest = new SearchRequest(); // <1> @@ -715,4 +687,90 @@ public void onFailure(Exception e) { assertTrue(succeeded); } } + + public void testMultiSearch() throws Exception { + indexSearchTestData(); + RestHighLevelClient client = highLevelClient(); + { + // tag::multi-search-request-basic + MultiSearchRequest request = new MultiSearchRequest(); // <1> + SearchRequest firstSearchRequest = new SearchRequest(); // <2> + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + searchSourceBuilder.query(QueryBuilders.matchQuery("user", "kimchy")); + firstSearchRequest.source(searchSourceBuilder); + request.add(firstSearchRequest); // <3> + SearchRequest secondSearchRequest = new SearchRequest(); // <4> + searchSourceBuilder = new SearchSourceBuilder(); + searchSourceBuilder.query(QueryBuilders.matchQuery("user", "luca")); + secondSearchRequest.source(searchSourceBuilder); + request.add(secondSearchRequest); + // end::multi-search-request-basic + // tag::multi-search-execute + MultiSearchResponse response = client.multiSearch(request); + // end::multi-search-execute + // tag::multi-search-response + MultiSearchResponse.Item firstResponse = response.getResponses()[0]; // <1> + assertNull(firstResponse.getFailure()); // <2> + SearchResponse searchResponse = firstResponse.getResponse(); // <3> + assertEquals(3, searchResponse.getHits().getTotalHits()); + MultiSearchResponse.Item secondResponse = response.getResponses()[1]; // <4> + assertNull(secondResponse.getFailure()); + searchResponse = secondResponse.getResponse(); + assertEquals(1, searchResponse.getHits().getTotalHits()); + // end::multi-search-response + + // tag::multi-search-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(MultiSearchResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::multi-search-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::multi-search-execute-async + client.multiSearchAsync(request, listener); // <1> + // end::multi-search-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + { + // tag::multi-search-request-index + MultiSearchRequest request = new MultiSearchRequest(); + request.add(new SearchRequest("posts") // <1> + .types("doc")); // <2> + // end::multi-search-request-index + MultiSearchResponse response = client.multiSearch(request); + MultiSearchResponse.Item firstResponse = response.getResponses()[0]; + assertNull(firstResponse.getFailure()); + SearchResponse searchResponse = firstResponse.getResponse(); + assertEquals(3, searchResponse.getHits().getTotalHits()); + } + } + + private void indexSearchTestData() throws IOException { + BulkRequest request = new BulkRequest(); + request.add(new IndexRequest("posts", "doc", "1") + .source(XContentType.JSON, "title", "In which order are my Elasticsearch queries executed?", "user", + Arrays.asList("kimchy", "luca"), "innerObject", Collections.singletonMap("key", "value"))); + request.add(new IndexRequest("posts", "doc", "2") + .source(XContentType.JSON, "title", "Current status and upcoming changes in Elasticsearch", "user", + Arrays.asList("kimchy", "christoph"), "innerObject", Collections.singletonMap("key", "value"))); + request.add(new IndexRequest("posts", "doc", "3") + .source(XContentType.JSON, "title", "The Future of Federated Search in Elasticsearch", "user", + Arrays.asList("kimchy", "tanguy"), "innerObject", Collections.singletonMap("key", "value"))); + request.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + BulkResponse bulkResponse = highLevelClient().bulk(request); + assertSame(RestStatus.OK, bulkResponse.status()); + assertFalse(bulkResponse.hasFailures()); + } } diff --git a/docs/java-rest/high-level/search/multi-search.asciidoc b/docs/java-rest/high-level/search/multi-search.asciidoc new file mode 100644 index 0000000000000..1b76f8976666a --- /dev/null +++ b/docs/java-rest/high-level/search/multi-search.asciidoc @@ -0,0 +1,90 @@ +[[java-rest-high-multi-search]] +=== Multi-Search API + +The `multiSearch` API executes multiple <> +requests in a single http request in parallel. + +[[java-rest-high-multi-search-request]] +==== Multi-Search Request + +The `MultiSearchRequest` is built empty and you add all of the searches that +you wish to execute to it: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[multi-search-request-basic] +-------------------------------------------------- +<1> Create an empty `MultiSearchRequest`. +<2> Create an empty `SearchRequest` and populate it just like you +would for a regular <>. +<3> Add the `SearchRequest` to the `MultiSearchRequest`. +<4> Build a second `SearchRequest` and add it to the `MultiSearchRequest`. + +===== Optional arguments + +The `SearchRequest`s inside of `MultiSearchRequest` support all of +<>'s optional arguments. +For example: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[search-request-indices-types] +-------------------------------------------------- +<1> Restricts the request to an index +<2> Limits the request to a type + +[[java-rest-high-multi-search-sync]] +==== Synchronous Execution + +The `multiSearch` method executes `MultiSearchRequest`s synchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[multi-search-execute] +-------------------------------------------------- + +[[java-rest-high-multi-search-async]] +==== Asynchronous Execution + +The `multiSearchAsync` method executes `MultiSearchRequest`s asynchronously, +calling the provided `ActionListener` when the response is ready. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[search-execute-async] +-------------------------------------------------- +<1> The `MultiSearchRequest` to execute and the `ActionListener` to use when +the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `MultiSearchResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[multi-search-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. +<2> Called when the whole `SearchRequest` fails. + +==== MultiSearchResponse + +The `MultiSearchResponse` that is returned by executing the `multiSearch` +a `MultiSearchResponse.Item` for each `SearchRequest` in the +`MultiSearchRequest`. Each `MultiSearchResponse.Item` contains an +exception in `getFailure` if the request failed or a +<> in `getResponse` if +the request succeeded: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[multi-search-response] +-------------------------------------------------- +<1> The item for the first search. +<2> It succeeded so `getFailure` returns null. +<3> And there is a <> in +`getResponse`. +<4> The item for the second search. diff --git a/docs/java-rest/high-level/search/search.asciidoc b/docs/java-rest/high-level/search/search.asciidoc index 2e8dda64286f4..af81775a90072 100644 --- a/docs/java-rest/high-level/search/search.asciidoc +++ b/docs/java-rest/high-level/search/search.asciidoc @@ -20,6 +20,7 @@ include-tagged::{doc-tests}/SearchDocumentationIT.java[search-request-basic] <3> Add a `match_all` query to the `SearchSourceBuilder`. <4> Add the `SearchSourceBuilder` to the `SeachRequest`. +[[java-rest-high-search-request-optional]] ===== Optional arguments Let's first look at some of the optional arguments of a `SearchRequest`: @@ -140,7 +141,7 @@ The `SearchSourceBuilder` allows to add one or more `SortBuilder` instances. The include-tagged::{doc-tests}/SearchDocumentationIT.java[search-source-sorting] -------------------------------------------------- <1> Sort descending by `_score` (the default) -<2> Also sort ascending by `_id` field +<2> Also sort ascending by `_id` field ===== Source filtering @@ -268,6 +269,7 @@ include-tagged::{doc-tests}/SearchDocumentationIT.java[search-execute-listener] <1> Called when the execution is successfully completed. <2> Called when the whole `SearchRequest` fails. +[[java-rest-high-search-response]] ==== SearchResponse The `SearchResponse` that is returned by executing the search provides details diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index de5a3d6b6a656..0330b1903c5bf 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -31,9 +31,11 @@ The Java High Level REST Client supports the following Search APIs: * <> * <> * <> +* <> include::search/search.asciidoc[] include::search/scroll.asciidoc[] +include::search/multi-search.asciidoc[] == Miscellaneous APIs From 3b8a8867c482ec265685d702e2aa43d14d36c040 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Fri, 23 Mar 2018 15:53:36 +0100 Subject: [PATCH 23/27] [DOCS] Unregister repository instead of deleting it (#29206) Relates to #15426 --- docs/reference/modules/snapshots.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/modules/snapshots.asciidoc b/docs/reference/modules/snapshots.asciidoc index b8883173b9890..ea3f99debb94e 100644 --- a/docs/reference/modules/snapshots.asciidoc +++ b/docs/reference/modules/snapshots.asciidoc @@ -401,7 +401,7 @@ created the snapshotting process will be aborted and all files created as part o cleaned. Therefore, the delete snapshot operation can be used to cancel long running snapshot operations that were started by mistake. -A repository can be deleted using the following command: +A repository can be unregistered using the following command: [source,sh] ----------------------------------- @@ -410,7 +410,7 @@ DELETE /_snapshot/my_fs_backup // CONSOLE // TEST[continued] -When a repository is deleted, Elasticsearch only removes the reference to the location where the repository is storing +When a repository is unregistered, Elasticsearch only removes the reference to the location where the repository is storing the snapshots. The snapshots themselves are left untouched and in place. [float] From 16bffc739463950b94b1f362e146c273ca3dc4ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Petr=20Nov=C3=A1k?= Date: Fri, 23 Mar 2018 16:30:01 +0100 Subject: [PATCH 24/27] Docs: Link C++ client lib elasticlient (#28949) elasticlient is simple library for simplified work with Elasticsearch in C++ --- docs/community-clients/index.asciidoc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/community-clients/index.asciidoc b/docs/community-clients/index.asciidoc index afa8b0f5879b7..0fd5c3a483f50 100644 --- a/docs/community-clients/index.asciidoc +++ b/docs/community-clients/index.asciidoc @@ -8,6 +8,7 @@ Besides the link:/guide[officially supported Elasticsearch clients], there are a number of clients that have been contributed by the community for various languages: * <> +* <> * <> * <> * <> @@ -35,6 +36,10 @@ a number of clients that have been contributed by the community for various lang * https://www.b4x.com/android/forum/threads/server-jelasticsearch-search-and-text-analytics.73335/ B4J client based on the official Java REST client. +[[cpp]] +== C++ +* https://github.com/seznam/elasticlient[elasticlient]: simple library for simplified work with Elasticsearch in C++ + [[clojure]] == Clojure From 687fe860ac5a38a13391960112a6f603d30eb9b8 Mon Sep 17 00:00:00 2001 From: Jean-Charles Legras Date: Fri, 23 Mar 2018 15:49:20 +0100 Subject: [PATCH 25/27] Docs: Update docs/index_.asciidoc (#29172) Use `_doc` in the routing example instead of `tweet` to agree with the text and line up with the other examples. --- docs/reference/docs/index_.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/docs/index_.asciidoc b/docs/reference/docs/index_.asciidoc index ff42adf91336b..fe1ebf4739632 100644 --- a/docs/reference/docs/index_.asciidoc +++ b/docs/reference/docs/index_.asciidoc @@ -249,7 +249,7 @@ on a per-operation basis using the `routing` parameter. For example: [source,js] -------------------------------------------------- -POST twitter/tweet?routing=kimchy +POST twitter/_doc?routing=kimchy { "user" : "kimchy", "post_date" : "2009-11-15T14:12:12", From d400a08788406885a55d86a079985aa047a68c4b Mon Sep 17 00:00:00 2001 From: Nicholas Knize Date: Fri, 23 Mar 2018 11:07:24 -0500 Subject: [PATCH 26/27] [DOCS] Remove ignore_z_value parameter link Removes invalid ignore_z_value parameter link in geo-point.asciidoc. --- docs/reference/mapping/types/geo-point.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/mapping/types/geo-point.asciidoc b/docs/reference/mapping/types/geo-point.asciidoc index 83e2064e5b8cc..ae81773e6a0a2 100644 --- a/docs/reference/mapping/types/geo-point.asciidoc +++ b/docs/reference/mapping/types/geo-point.asciidoc @@ -105,7 +105,7 @@ The following parameters are accepted by `geo_point` fields: If `true`, malformed geo-points are ignored. If `false` (default), malformed geo-points throw an exception and reject the whole document. -<>:: +`ignore_z_value`:: If `true` (default) three dimension points will be accepted (stored in source) but only latitude and longitude values will be indexed; the third dimension is From afe95a7738c86ddcbd1c0368c2ca3983b06ce0f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Fri, 23 Mar 2018 18:04:32 +0100 Subject: [PATCH 27/27] [Docs] Add rank_eval size parameter k (#29218) The rank_eval documentation was missing an explanation of the parameter `k` that controls the number of top hits that are used in the ranking evaluation. Closes #29205 --- docs/reference/search/rank-eval.asciidoc | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/docs/reference/search/rank-eval.asciidoc b/docs/reference/search/rank-eval.asciidoc index 53c6ac9cf6030..eace381bfaa48 100644 --- a/docs/reference/search/rank-eval.asciidoc +++ b/docs/reference/search/rank-eval.asciidoc @@ -1,14 +1,16 @@ [[search-rank-eval]] == Ranking Evaluation API +experimental[The ranking evaluation API is experimental and may be changed or removed completely in a future release, +as well as change in non-backwards compatible ways on minor versions updates. Elastic will take a best effort +approach to fix any issues, but experimental features are not subject to the support SLA of official GA features.] + The ranking evaluation API allows to evaluate the quality of ranked search results over a set of typical search queries. Given this set of queries and a list or manually rated documents, the `_rank_eval` endpoint calculates and returns typical information retrieval metrics like _mean reciprocal rank_, _precision_ or _discounted cumulative gain_. -experimental[The ranking evaluation API is new and may change in non-backwards compatible ways in the future, even on minor versions updates.] - [float] === Overview @@ -41,7 +43,7 @@ GET /my_index/_rank_eval { "requests": [ ... ], <1> "metric": { <2> - "reciprocal_rank": { ... } <3> + "mean_reciprocal_rank": { ... } <3> } } ------------------------------ @@ -85,7 +87,7 @@ The request section contains several search requests typical to your application <3> a list of document ratings, each entry containing the documents `_index` and `_id` together with the rating of the documents relevance with regards to this search request -A document `rating` can be any integer value that expresses the relevance of the document on a user defined scale. For some of the metrics, just giving a binary rating (e.g. `0` for irrelevant and `1` for relevant) will be sufficient, other metrics can use a more fine grained scale. +A document `rating` can be any integer value that expresses the relevance of the document on a user defined scale. For some of the metrics, just giving a binary rating (e.g. `0` for irrelevant and `1` for relevant) will be sufficient, other metrics can use a more fine grained scale. [float] === Template based ranking evaluation @@ -158,6 +160,7 @@ GET /twitter/_rank_eval }], "metric": { "precision": { + "k" : 20, "relevant_rating_threshold": 1, "ignore_unlabeled": false } @@ -172,7 +175,9 @@ The `precision` metric takes the following optional parameters [cols="<,<",options="header",] |======================================================================= |Parameter |Description -|`relevant_rating_threshold` |Sets the rating threshold above which documents are considered to be +|`k` |sets the maximum number of documents retrieved per query. This value will act in place of the usual `size` parameter +in the query. Defaults to 10. +|`relevant_rating_threshold` |sets the rating threshold above which documents are considered to be "relevant". Defaults to `1`. |`ignore_unlabeled` |controls how unlabeled documents in the search results are counted. If set to 'true', unlabeled documents are ignored and neither count as relevant or irrelevant. Set to 'false' (the default), they are treated as irrelevant. @@ -198,6 +203,7 @@ GET /twitter/_rank_eval }], "metric": { "mean_reciprocal_rank": { + "k" : 20, "relevant_rating_threshold" : 1 } } @@ -211,6 +217,8 @@ The `mean_reciprocal_rank` metric takes the following optional parameters [cols="<,<",options="header",] |======================================================================= |Parameter |Description +|`k` |sets the maximum number of documents retrieved per query. This value will act in place of the usual `size` parameter +in the query. Defaults to 10. |`relevant_rating_threshold` |Sets the rating threshold above which documents are considered to be "relevant". Defaults to `1`. |======================================================================= @@ -234,6 +242,7 @@ GET /twitter/_rank_eval }], "metric": { "dcg": { + "k" : 20, "normalize": false } } @@ -247,6 +256,8 @@ The `dcg` metric takes the following optional parameters: [cols="<,<",options="header",] |======================================================================= |Parameter |Description +|`k` |sets the maximum number of documents retrieved per query. This value will act in place of the usual `size` parameter +in the query. Defaults to 10. |`normalize` | If set to `true`, this metric will calculate the https://en.wikipedia.org/wiki/Discounted_cumulative_gain#Normalized_DCG[Normalized DCG]. |=======================================================================