From 99c8a3380200b5ca38dd17a0a7e40e5b913c5989 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Thu, 10 May 2018 13:04:12 -0400 Subject: [PATCH 01/23] LLClient: Support host selection Allows users of the Low Level REST client to specify which hosts a request should be run on. They implement the `NodeSelector` interface or reuse a built in selector like `NOT_MASTER_ONLY` to chose which nodes are valid. Using it looks like: ``` Request request = new Request("POST", "/foo/_search"); request.setNodeSelector(NodeSelector.NOT_MASTER_ONLY); ... ``` This introduces a new `Node` object which contains a `HttpHost` and the metadata about the host. At this point that metadata is just `version` and `roles` but I plan to add node attributes in a followup. The canonical way to **get** this metadata is to use the `Sniffer` to pull the information from the Elasticsearch cluster. I've marked this as "breaking-java" because it breaks custom implementations of `HostsSniffer` by renaming the interface to `NodesSniffer` and by changing it from returning a `List` to a `List`. It *shouldn't* break anyone else though. Because we expect to find it useful, this also implements `host_selector` support to `do` statements in the yaml tests. Using it looks a little like: ``` --- "example test": - skip: features: host_selector - do: host_selector: version: " - 7.0.0" # same syntax as skip apiname: something: true ``` The `do` section parses the `version` string into a host selector that uses the same version comparison logic as the `skip` section. When the `do` section is executed it passed the off to the `RestClient`, using the `ElasticsearchHostsSniffer` to sniff the required metadata. The idea is to use this in mixed version tests to target a specific version of Elasticsearch so we can be sure about the deprecation logging though we don't currently have any examples that need it. We do, however, have at least one open pull request that requires something like this to properly test it. Closes #21888 (kind of, it isn't in the high level client, but we'll do that in a followup) --- .../elasticsearch/client/DeadHostState.java | 43 +-- .../java/org/elasticsearch/client/Node.java | 235 ++++++++++++++ .../elasticsearch/client/NodeSelector.java | 116 +++++++ .../org/elasticsearch/client/Request.java | 15 + .../elasticsearch/client/RequestLogger.java | 6 +- .../org/elasticsearch/client/Response.java | 2 +- .../org/elasticsearch/client/RestClient.java | 289 ++++++++++++------ .../client/RestClientBuilder.java | 22 +- .../client/DeadHostStateTests.java | 51 +--- .../client/HostsTrackingFailureListener.java | 16 +- .../client/NodeSelectorTests.java | 59 ++++ .../org/elasticsearch/client/NodeTests.java | 89 ++++++ .../client/RestClientBuilderTests.java | 31 +- .../RestClientMultipleHostsIntegTests.java | 64 +++- .../client/RestClientMultipleHostsTests.java | 94 ++++-- .../client/RestClientSingleHostTests.java | 14 +- .../elasticsearch/client/RestClientTests.java | 225 +++++++++++++- .../RestClientDocumentation.java | 8 +- ...er.java => ElasticsearchNodesSniffer.java} | 144 +++++++-- .../{HostsSniffer.java => NodesSniffer.java} | 8 +- .../client/sniff/SniffOnFailureListener.java | 5 +- .../elasticsearch/client/sniff/Sniffer.java | 37 +-- .../client/sniff/SnifferBuilder.java | 18 +- .../ElasticsearchNodesSnifferParseTests.java | 109 +++++++ ...va => ElasticsearchNodesSnifferTests.java} | 111 ++++--- ...ostsSniffer.java => MockNodesSniffer.java} | 9 +- .../sniff/SniffOnFailureListenerTests.java | 5 +- .../client/sniff/SnifferBuilderTests.java | 6 +- .../documentation/SnifferDocumentation.java | 29 +- .../src/test/resources/2.0.0_nodes_http.json | 141 +++++++++ .../src/test/resources/5.0.0_nodes_http.json | 169 ++++++++++ .../src/test/resources/6.0.0_nodes_http.json | 169 ++++++++++ client/sniffer/src/test/resources/readme.txt | 4 + docs/java-rest/low-level/sniffer.asciidoc | 10 +- .../rest-api-spec/test/README.asciidoc | 18 ++ test/framework/build.gradle | 1 + .../rest/yaml/ClientYamlDocsTestClient.java | 22 +- .../test/rest/yaml/ClientYamlTestClient.java | 32 +- .../yaml/ClientYamlTestExecutionContext.java | 28 +- .../rest/yaml/ESClientYamlSuiteTestCase.java | 82 ++++- .../test/rest/yaml/Features.java | 1 + .../test/rest/yaml/parser/package-info.java | 24 -- .../rest/yaml/section/ApiCallSection.java | 17 ++ .../yaml/section/ClientYamlTestSection.java | 7 + .../test/rest/yaml/section/DoSection.java | 50 ++- .../test/rest/yaml/section/SkipSection.java | 2 +- .../ClientYamlTestExecutionContextTests.java | 26 +- .../yaml/ESClientYamlSuiteTestCaseTests.java | 63 ++++ .../section/ClientYamlTestSectionTests.java | 31 +- .../rest/yaml/section/DoSectionTests.java | 38 +++ .../exporter/http/HttpExporter.java | 12 +- .../exporter/http/NodeFailureListener.java | 8 +- .../http/NodeFailureListenerTests.java | 13 +- 53 files changed, 2399 insertions(+), 429 deletions(-) create mode 100644 client/rest/src/main/java/org/elasticsearch/client/Node.java create mode 100644 client/rest/src/main/java/org/elasticsearch/client/NodeSelector.java create mode 100644 client/rest/src/test/java/org/elasticsearch/client/NodeSelectorTests.java create mode 100644 client/rest/src/test/java/org/elasticsearch/client/NodeTests.java rename client/sniffer/src/main/java/org/elasticsearch/client/sniff/{ElasticsearchHostsSniffer.java => ElasticsearchNodesSniffer.java} (50%) rename client/sniffer/src/main/java/org/elasticsearch/client/sniff/{HostsSniffer.java => NodesSniffer.java} (85%) create mode 100644 client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferParseTests.java rename client/sniffer/src/test/java/org/elasticsearch/client/sniff/{ElasticsearchHostsSnifferTests.java => ElasticsearchNodesSnifferTests.java} (76%) rename client/sniffer/src/test/java/org/elasticsearch/client/sniff/{MockHostsSniffer.java => MockNodesSniffer.java} (78%) create mode 100644 client/sniffer/src/test/resources/2.0.0_nodes_http.json create mode 100644 client/sniffer/src/test/resources/5.0.0_nodes_http.json create mode 100644 client/sniffer/src/test/resources/6.0.0_nodes_http.json create mode 100644 client/sniffer/src/test/resources/readme.txt delete mode 100644 test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/package-info.java diff --git a/client/rest/src/main/java/org/elasticsearch/client/DeadHostState.java b/client/rest/src/main/java/org/elasticsearch/client/DeadHostState.java index 452e71b14d93a..1d4e638e068f4 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/DeadHostState.java +++ b/client/rest/src/main/java/org/elasticsearch/client/DeadHostState.java @@ -29,47 +29,35 @@ final class DeadHostState implements Comparable { private static final long MIN_CONNECTION_TIMEOUT_NANOS = TimeUnit.MINUTES.toNanos(1); - private static final long MAX_CONNECTION_TIMEOUT_NANOS = TimeUnit.MINUTES.toNanos(30); + static final long MAX_CONNECTION_TIMEOUT_NANOS = TimeUnit.MINUTES.toNanos(30); private final int failedAttempts; private final long deadUntilNanos; - private final TimeSupplier timeSupplier; /** * Build the initial dead state of a host. Useful when a working host stops functioning * and needs to be marked dead after its first failure. In such case the host will be retried after a minute or so. * - * @param timeSupplier a way to supply the current time and allow for unit testing + * @param now the current time in nanoseconds. Prefer a source designed to measure elapsed time like {@link System#nanoTime()}. */ - DeadHostState(TimeSupplier timeSupplier) { + DeadHostState(long now) { this.failedAttempts = 1; - this.deadUntilNanos = timeSupplier.nanoTime() + MIN_CONNECTION_TIMEOUT_NANOS; - this.timeSupplier = timeSupplier; + this.deadUntilNanos = now + MIN_CONNECTION_TIMEOUT_NANOS; } /** * Build the dead state of a host given its previous dead state. Useful when a host has been failing before, hence * it already failed for one or more consecutive times. The more failed attempts we register the longer we wait * to retry that same host again. Minimum is 1 minute (for a node the only failed once created - * through {@link #DeadHostState(TimeSupplier)}), maximum is 30 minutes (for a node that failed more than 10 consecutive times) + * through {@link #DeadHostState(long)}), maximum is 30 minutes (for a node that failed more than 10 consecutive times) * - * @param previousDeadHostState the previous state of the host which allows us to increase the wait till the next retry attempt + * @param now the current time in nanoseconds. Prefer a source designed to measure elapsed time like {@link System#nanoTime()}. */ - DeadHostState(DeadHostState previousDeadHostState, TimeSupplier timeSupplier) { + DeadHostState(DeadHostState previousDeadHostState, long now) { long timeoutNanos = (long)Math.min(MIN_CONNECTION_TIMEOUT_NANOS * 2 * Math.pow(2, previousDeadHostState.failedAttempts * 0.5 - 1), MAX_CONNECTION_TIMEOUT_NANOS); - this.deadUntilNanos = timeSupplier.nanoTime() + timeoutNanos; + this.deadUntilNanos = now + timeoutNanos; this.failedAttempts = previousDeadHostState.failedAttempts + 1; - this.timeSupplier = timeSupplier; - } - - /** - * Indicates whether it's time to retry to failed host or not. - * - * @return true if the host should be retried, false otherwise - */ - boolean shallBeRetried() { - return timeSupplier.nanoTime() - deadUntilNanos > 0; } /** @@ -96,19 +84,4 @@ public String toString() { ", deadUntilNanos=" + deadUntilNanos + '}'; } - - /** - * Time supplier that makes timing aspects pluggable to ease testing - */ - interface TimeSupplier { - - TimeSupplier DEFAULT = new TimeSupplier() { - @Override - public long nanoTime() { - return System.nanoTime(); - } - }; - - long nanoTime(); - } } diff --git a/client/rest/src/main/java/org/elasticsearch/client/Node.java b/client/rest/src/main/java/org/elasticsearch/client/Node.java new file mode 100644 index 0000000000000..b26a0fa603c99 --- /dev/null +++ b/client/rest/src/main/java/org/elasticsearch/client/Node.java @@ -0,0 +1,235 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import static java.util.Collections.unmodifiableSet; + +import java.util.HashSet; +import java.util.Objects; +import java.util.Set; + +import org.apache.http.HttpHost; + +/** + * Metadata about an {@link HttpHost} running Elasticsearch. + */ +public class Node { + /** + * Address that this host claims is its primary contact point. + */ + private final HttpHost host; + /** + * Addresses on which the host is listening. These are useful to have + * around because they allow you to find a host based on any address it + * is listening on. + */ + private final Set boundHosts; + /** + * Name of the node as configured by the {@code node.name} attribute. + */ + private final String name; + /** + * Version of Elasticsearch that the node is running or {@code null} + * if we don't know the version. + */ + private final String version; + /** + * Roles that the Elasticsearch process on the host has or {@code null} + * if we don't know what roles the node has. + */ + private final Roles roles; + + /** + * Create a {@linkplain Node} with metadata. All parameters except + * {@code host} are nullable and implementations of {@link NodeSelector} + * need to decide what to do in their absence. + */ + public Node(HttpHost host, Set boundHosts, String name, String version, Roles roles) { + if (host == null) { + throw new IllegalArgumentException("host cannot be null"); + } + this.host = host; + this.boundHosts = boundHosts; + this.name = name; + this.version = version; + this.roles = roles; + } + + /** + * Create a {@linkplain Node} without any metadata. + */ + public Node(HttpHost host) { + this(host, null, null, null, null); + } + + /** + * Make a copy of this {@link Node} but replacing its + * {@link #getHost() host}. Use this when the sniffing implementation + * returns a {@link #getHost() host} that is not useful to the client. + */ + public Node withHost(HttpHost host) { + /* + * If the new host isn't in the bound hosts list we add it so the + * result looks sane. + */ + Set boundHosts = this.boundHosts; + if (false == boundHosts.contains(host)) { + boundHosts = new HashSet<>(boundHosts); + boundHosts.add(host); + boundHosts = unmodifiableSet(boundHosts); + } + return new Node(host, boundHosts, name, version, roles); + } + + /** + * Contact information for the host. + */ + public HttpHost getHost() { + return host; + } + + /** + * Addresses on which the host is listening. These are useful to have + * around because they allow you to find a host based on any address it + * is listening on. + */ + public Set getBoundHosts() { + return boundHosts; + } + + /** + * @return the name + */ + public String getName() { + return name; + } + + /** + * Version of Elasticsearch that the node is running or {@code null} + * if we don't know the version. + */ + public String getVersion() { + return version; + } + + /** + * Roles that the Elasticsearch process on the host has or {@code null} + * if we don't know what roles the node has. + */ + public Roles getRoles() { + return roles; + } + + @Override + public String toString() { + StringBuilder b = new StringBuilder(); + b.append("[host=").append(host); + if (boundHosts != null) { + b.append(", bound=").append(boundHosts); + } + if (name != null) { + b.append(", name=").append(name); + } + if (version != null) { + b.append(", version=").append(version); + } + if (roles != null) { + b.append(", roles=").append(roles); + } + return b.append(']').toString(); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + Node other = (Node) obj; + return host.equals(other.host) + && Objects.equals(boundHosts, other.boundHosts) + && Objects.equals(version, other.version) + && Objects.equals(name, other.name) + && Objects.equals(roles, other.roles); + } + + @Override + public int hashCode() { + return Objects.hash(host, boundHosts, name, version, roles); + } + + /** + * Role information about an Elasticsearch process. + */ + public static final class Roles { + private final boolean masterEligible; + private final boolean data; + private final boolean ingest; + + public Roles(boolean masterEligible, boolean data, boolean ingest) { + this.masterEligible = masterEligible; + this.data = data; + this.ingest = ingest; + } + + /** + * The node could be elected master. + */ + public boolean isMasterEligible() { + return masterEligible; + } + /** + * The node stores data. + */ + public boolean isData() { + return data; + } + /** + * The node runs ingest pipelines. + */ + public boolean isIngest() { + return ingest; + } + + @Override + public String toString() { + StringBuilder result = new StringBuilder(3); + if (masterEligible) result.append('m'); + if (data) result.append('d'); + if (ingest) result.append('i'); + return result.toString(); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + Roles other = (Roles) obj; + return masterEligible == other.masterEligible + && data == other.data + && ingest == other.ingest; + } + + @Override + public int hashCode() { + return Objects.hash(masterEligible, data, ingest); + } + } +} diff --git a/client/rest/src/main/java/org/elasticsearch/client/NodeSelector.java b/client/rest/src/main/java/org/elasticsearch/client/NodeSelector.java new file mode 100644 index 0000000000000..b9fb35d24168c --- /dev/null +++ b/client/rest/src/main/java/org/elasticsearch/client/NodeSelector.java @@ -0,0 +1,116 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; + +/** + * Selects nodes that can receive requests. Used to keep requests away + * from master nodes or to send them to nodes with a particular attribute. + * Use with {@link Request#setNodeSelector(NodeSelector)}. + */ +public interface NodeSelector { + /** + * Select the {@link Node}s to which to send requests. This may be called + * twice per request, once for "living" nodes that have not had been + * blacklisted by previous errors if there are any. If it returns an + * empty list when sent the living nodes or if there aren't any living + * nodes left then this will be called with a list of "dead" nodes that + * have been blacklisted by previous failures. In both cases it should + * return a list of nodes sorted by its preference for which node is used. + * If it is operating on "living" nodes that it returns function as + * fallbacks in case of request failures. If it is operating on dead nodes + * then the dead node that it returns is attempted but no others. + * + * @param nodes an unmodifiable list of {@linkplain Node}s in the order + * that the {@link RestClient} would prefer to use them + * @return a subset of the provided list of {@linkplain Node}s that the + * selector approves of, in the order that the selector would prefer + * to use them. + */ + List select(List nodes); + + /** + * Selector that matches any node. + */ + NodeSelector ANY = new NodeSelector() { + @Override + public List select(List nodes) { + return nodes; + } + + @Override + public String toString() { + return "ANY"; + } + }; + + /** + * Selector that matches any node that has metadata and doesn't + * have the {@code master} role OR it has the data {@code data} + * role. It does not reorder the nodes sent to it. + */ + NodeSelector NOT_MASTER_ONLY = new NodeSelector() { + @Override + public List select(List nodes) { + List subset = new ArrayList<>(nodes.size()); + for (Node node : nodes) { + if (node.getRoles() == null) continue; + if (false == node.getRoles().isMasterEligible() || node.getRoles().isData()) { + subset.add(node); + } + } + return subset; + } + + @Override + public String toString() { + return "NOT_MASTER_ONLY"; + } + }; + + /** + * Selector that composes two selectors, running the "right" most selector + * first and then running the "left" selector on the results of the "right" + * selector. + */ + class Compose implements NodeSelector { + private final NodeSelector lhs; + private final NodeSelector rhs; + + public Compose(NodeSelector lhs, NodeSelector rhs) { + this.lhs = Objects.requireNonNull(lhs, "lhs is required"); + this.rhs = Objects.requireNonNull(rhs, "rhs is required"); + } + + @Override + public List select(List nodes) { + return lhs.select(rhs.select(nodes)); + } + + @Override + public String toString() { + // . as in haskell's "compose" operator + return lhs + "." + rhs; + } + } +} diff --git a/client/rest/src/main/java/org/elasticsearch/client/Request.java b/client/rest/src/main/java/org/elasticsearch/client/Request.java index 92610239cae92..b82c045023537 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/Request.java +++ b/client/rest/src/main/java/org/elasticsearch/client/Request.java @@ -43,6 +43,7 @@ public final class Request { private HttpEntity entity; private Header[] headers = NO_HEADERS; + private NodeSelector nodeSelector = NodeSelector.ANY; private HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory = HttpAsyncResponseConsumerFactory.DEFAULT; @@ -142,6 +143,20 @@ public Header[] getHeaders() { return headers; } + /** + * Configure which nodes are valid destinations for this request. + */ + public void setNodeSelector(NodeSelector nodeSelector) { + this.nodeSelector = nodeSelector; + } + + /** + * The selector that chooses which nodes are valid destinations for this request. + */ + public NodeSelector getNodeSelector() { + return nodeSelector; + } + /** * set the {@link HttpAsyncResponseConsumerFactory} used to create one * {@link HttpAsyncResponseConsumer} callback per retry. Controls how the diff --git a/client/rest/src/main/java/org/elasticsearch/client/RequestLogger.java b/client/rest/src/main/java/org/elasticsearch/client/RequestLogger.java index 07ff89b7e3fb0..7c56a7edf97a9 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RequestLogger.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RequestLogger.java @@ -87,14 +87,14 @@ static void logResponse(Log logger, HttpUriRequest request, HttpHost host, HttpR /** * Logs a request that failed */ - static void logFailedRequest(Log logger, HttpUriRequest request, HttpHost host, Exception e) { + static void logFailedRequest(Log logger, HttpUriRequest request, Node node, Exception e) { if (logger.isDebugEnabled()) { - logger.debug("request [" + request.getMethod() + " " + host + getUri(request.getRequestLine()) + "] failed", e); + logger.debug("request [" + request.getMethod() + " " + node.getHost() + getUri(request.getRequestLine()) + "] failed", e); } if (tracer.isTraceEnabled()) { String traceRequest; try { - traceRequest = buildTraceRequest(request, host); + traceRequest = buildTraceRequest(request, node.getHost()); } catch (IOException e1) { tracer.trace("error while reading request for trace purposes", e); traceRequest = ""; diff --git a/client/rest/src/main/java/org/elasticsearch/client/Response.java b/client/rest/src/main/java/org/elasticsearch/client/Response.java index 02aedb4765abe..39bbf769713b2 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/Response.java +++ b/client/rest/src/main/java/org/elasticsearch/client/Response.java @@ -40,7 +40,7 @@ public class Response { Response(RequestLine requestLine, HttpHost host, HttpResponse response) { Objects.requireNonNull(requestLine, "requestLine cannot be null"); - Objects.requireNonNull(host, "node cannot be null"); + Objects.requireNonNull(host, "host cannot be null"); Objects.requireNonNull(response, "response cannot be null"); this.requestLine = requestLine; this.host = host; diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java index 05fa4d536b3b6..80454ab245d0c 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java @@ -48,6 +48,9 @@ import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; import javax.net.ssl.SSLHandshakeException; + +import static java.util.Collections.singletonList; + import java.io.Closeable; import java.io.IOException; import java.net.SocketTimeoutException; @@ -101,48 +104,93 @@ public class RestClient implements Closeable { final List
defaultHeaders; private final long maxRetryTimeoutMillis; private final String pathPrefix; - private final AtomicInteger lastHostIndex = new AtomicInteger(0); - private volatile HostTuple> hostTuple; + private final AtomicInteger lastNodeIndex = new AtomicInteger(0); private final ConcurrentMap blacklist = new ConcurrentHashMap<>(); private final FailureListener failureListener; + private volatile NodeTuple> nodeTuple; RestClient(CloseableHttpAsyncClient client, long maxRetryTimeoutMillis, Header[] defaultHeaders, - HttpHost[] hosts, String pathPrefix, FailureListener failureListener) { + Node[] nodes, String pathPrefix, FailureListener failureListener) { this.client = client; this.maxRetryTimeoutMillis = maxRetryTimeoutMillis; this.defaultHeaders = Collections.unmodifiableList(Arrays.asList(defaultHeaders)); this.failureListener = failureListener; this.pathPrefix = pathPrefix; - setHosts(hosts); + setNodes(nodes); } /** * Returns a new {@link RestClientBuilder} to help with {@link RestClient} creation. * Creates a new builder instance and sets the hosts that the client will send requests to. + *

+ * Prefer this to {@link #builder(Node...)} if you have metadata up front about the nodes. + * If you don't either one is fine. + */ + public static RestClientBuilder builder(Node... nodes) { + return new RestClientBuilder(nodes); + } + + /** + * Returns a new {@link RestClientBuilder} to help with {@link RestClient} creation. + * Creates a new builder instance and sets the nodes that the client will send requests to. + *

+ * You can use this if you do not have metadata up front about the nodes. If you do, prefer + * {@link #builder(Node...)}. + * @see Node#Node(HttpHost) */ public static RestClientBuilder builder(HttpHost... hosts) { - return new RestClientBuilder(hosts); + return new RestClientBuilder(hostsToNodes(hosts)); } /** - * Replaces the hosts that the client communicates with. - * @see HttpHost + * Replaces the nodes that the client communicates without providing any + * metadata about any of the nodes. */ - public synchronized void setHosts(HttpHost... hosts) { - if (hosts == null || hosts.length == 0) { - throw new IllegalArgumentException("hosts must not be null nor empty"); + public void setHosts(HttpHost... hosts) { + setNodes(hostsToNodes(hosts)); + } + + /** + * Replaces the nodes that the client communicates with. Prefer this to + * {@link #setHosts(HttpHost...)} if you have metadata about the hosts + * like their Elasticsearch version of which roles they implement. + */ + public synchronized void setNodes(Node... nodes) { + if (nodes == null || nodes.length == 0) { + throw new IllegalArgumentException("nodes must not be null or empty"); } - Set httpHosts = new HashSet<>(); AuthCache authCache = new BasicAuthCache(); - for (HttpHost host : hosts) { - Objects.requireNonNull(host, "host cannot be null"); - httpHosts.add(host); - authCache.put(host, new BasicScheme()); + + for (Node node : nodes) { + if (node == null) { + throw new IllegalArgumentException("node cannot be null"); + } + authCache.put(node.getHost(), new BasicScheme()); } - this.hostTuple = new HostTuple<>(Collections.unmodifiableSet(httpHosts), authCache); + this.nodeTuple = new NodeTuple<>(Collections.unmodifiableList( + Arrays.asList(nodes)), authCache); this.blacklist.clear(); } + /** + * Get the list of nodes that the client knows about. The list is + * unmodifiable. + */ + public List getNodes() { + return nodeTuple.nodes; + } + + private static Node[] hostsToNodes(HttpHost[] hosts) { + if (hosts == null || hosts.length == 0) { + throw new IllegalArgumentException("hosts must not be null or empty"); + } + Node[] nodes = new Node[hosts.length]; + for (int i = 0; i < hosts.length; i++) { + nodes[i] = new Node(hosts[i]); + } + return nodes; + } + /** * Sends a request to the Elasticsearch cluster that the client points to. * Blocks until the request is completed and returns its response or fails @@ -428,7 +476,7 @@ public void performRequestAsync(String method, String endpoint, Map requestParams = new HashMap<>(request.getParameters()); //ignore is a special parameter supported by the clients, shouldn't be sent to es String ignoreString = requestParams.remove("ignore"); @@ -460,40 +508,40 @@ void performRequestAsyncNoCatch(Request request, ResponseListener listener) { setHeaders(httpRequest, request.getHeaders()); FailureTrackingResponseListener failureTrackingResponseListener = new FailureTrackingResponseListener(listener); long startTime = System.nanoTime(); - performRequestAsync(startTime, nextHost(), httpRequest, ignoreErrorCodes, + performRequestAsync(startTime, nextNode(request.getNodeSelector()), httpRequest, ignoreErrorCodes, request.getHttpAsyncResponseConsumerFactory(), failureTrackingResponseListener); } - private void performRequestAsync(final long startTime, final HostTuple> hostTuple, final HttpRequestBase request, + private void performRequestAsync(final long startTime, final NodeTuple> nodeTuple, final HttpRequestBase request, final Set ignoreErrorCodes, final HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory, final FailureTrackingResponseListener listener) { - final HttpHost host = hostTuple.hosts.next(); + final Node node = nodeTuple.nodes.next(); //we stream the request body if the entity allows for it - final HttpAsyncRequestProducer requestProducer = HttpAsyncMethods.create(host, request); + final HttpAsyncRequestProducer requestProducer = HttpAsyncMethods.create(node.getHost(), request); final HttpAsyncResponseConsumer asyncResponseConsumer = httpAsyncResponseConsumerFactory.createHttpAsyncResponseConsumer(); final HttpClientContext context = HttpClientContext.create(); - context.setAuthCache(hostTuple.authCache); + context.setAuthCache(nodeTuple.authCache); client.execute(requestProducer, asyncResponseConsumer, context, new FutureCallback() { @Override public void completed(HttpResponse httpResponse) { try { - RequestLogger.logResponse(logger, request, host, httpResponse); + RequestLogger.logResponse(logger, request, node.getHost(), httpResponse); int statusCode = httpResponse.getStatusLine().getStatusCode(); - Response response = new Response(request.getRequestLine(), host, httpResponse); + Response response = new Response(request.getRequestLine(), node.getHost(), httpResponse); if (isSuccessfulResponse(statusCode) || ignoreErrorCodes.contains(response.getStatusLine().getStatusCode())) { - onResponse(host); + onResponse(node); listener.onSuccess(response); } else { ResponseException responseException = new ResponseException(response); if (isRetryStatus(statusCode)) { //mark host dead and retry against next one - onFailure(host); + onFailure(node); retryIfPossible(responseException); } else { //mark host alive and don't retry, as the error should be a request problem - onResponse(host); + onResponse(node); listener.onDefinitiveFailure(responseException); } } @@ -505,8 +553,8 @@ public void completed(HttpResponse httpResponse) { @Override public void failed(Exception failure) { try { - RequestLogger.logFailedRequest(logger, request, host, failure); - onFailure(host); + RequestLogger.logFailedRequest(logger, request, node, failure); + onFailure(node); retryIfPossible(failure); } catch(Exception e) { listener.onDefinitiveFailure(e); @@ -514,7 +562,7 @@ public void failed(Exception failure) { } private void retryIfPossible(Exception exception) { - if (hostTuple.hosts.hasNext()) { + if (nodeTuple.nodes.hasNext()) { //in case we are retrying, check whether maxRetryTimeout has been reached long timeElapsedMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime); long timeout = maxRetryTimeoutMillis - timeElapsedMillis; @@ -525,7 +573,7 @@ private void retryIfPossible(Exception exception) { } else { listener.trackFailure(exception); request.reset(); - performRequestAsync(startTime, hostTuple, request, ignoreErrorCodes, httpAsyncResponseConsumerFactory, listener); + performRequestAsync(startTime, nodeTuple, request, ignoreErrorCodes, httpAsyncResponseConsumerFactory, listener); } } else { listener.onDefinitiveFailure(exception); @@ -554,54 +602,103 @@ private void setHeaders(HttpRequest httpRequest, Header[] requestHeaders) { } /** - * Returns an {@link Iterable} of hosts to be used for a request call. - * Ideally, the first host is retrieved from the iterable and used successfully for the request. - * Otherwise, after each failure the next host has to be retrieved from the iterator so that the request can be retried until - * there are no more hosts available to retry against. The maximum total of attempts is equal to the number of hosts in the iterable. - * The iterator returned will never be empty. In case there are no healthy hosts available, or dead ones to be be retried, - * one dead host gets returned so that it can be retried. + * Returns a non-empty {@link Iterator} of nodes to be used for a request + * that match the {@link NodeSelector}. + *

+ * If there are no living nodes that match the {@link NodeSelector} + * this will return the dead node that matches the {@link NodeSelector} + * that is closest to being revived. + *

+ * If no living and no dead nodes match the selector we retry a few + * times to handle concurrent modifications of the list of dead nodes. + * We never block the thread or {@link Thread#sleep} or anything like + * that. If the retries fail this throws a {@link IOException}. + * @throws IOException if no nodes are available */ - private HostTuple> nextHost() { - final HostTuple> hostTuple = this.hostTuple; - Collection nextHosts = Collections.emptySet(); - do { - Set filteredHosts = new HashSet<>(hostTuple.hosts); - for (Map.Entry entry : blacklist.entrySet()) { - if (entry.getValue().shallBeRetried() == false) { - filteredHosts.remove(entry.getKey()); - } + private NodeTuple> nextNode(NodeSelector nodeSelector) throws IOException { + NodeTuple> nodeTuple = this.nodeTuple; + List hosts = selectHosts(nodeTuple, blacklist, lastNodeIndex, System.nanoTime(), nodeSelector); + return new NodeTuple<>(hosts.iterator(), nodeTuple.authCache); + } + + static List selectHosts(NodeTuple> nodeTuple, + Map blacklist, AtomicInteger lastNodeIndex, + long now, NodeSelector nodeSelector) throws IOException { + /* + * Sort the nodes into living and dead lists. + */ + List livingNodes = new ArrayList<>(nodeTuple.nodes.size() - blacklist.size()); + List deadNodes = new ArrayList<>(blacklist.size()); + for (Node node : nodeTuple.nodes) { + DeadHostState deadness = blacklist.get(node.getHost()); + if (deadness == null) { + livingNodes.add(node); + continue; } - if (filteredHosts.isEmpty()) { - //last resort: if there are no good hosts to use, return a single dead one, the one that's closest to being retried - List> sortedHosts = new ArrayList<>(blacklist.entrySet()); - if (sortedHosts.size() > 0) { - Collections.sort(sortedHosts, new Comparator>() { - @Override - public int compare(Map.Entry o1, Map.Entry o2) { - return o1.getValue().compareTo(o2.getValue()); - } - }); - HttpHost deadHost = sortedHosts.get(0).getKey(); - logger.trace("resurrecting host [" + deadHost + "]"); - nextHosts = Collections.singleton(deadHost); + long nanosUntilRevival = now - deadness.getDeadUntilNanos(); + if (nanosUntilRevival > 0) { + livingNodes.add(node); + continue; + } + deadNodes.add(new DeadNodeAndRevival(node, nanosUntilRevival)); + } + + if (false == livingNodes.isEmpty()) { + /* + * Normal state: there is at least one living node. Rotate the + * list so subsequent requests to will prefer the nodes in a + * different order then run them through the NodeSelector so it + * can have its say in which nodes are ok and their ordering. If + * the selector is ok with any over the living nodes then use + * them for the request. + */ + Collections.rotate(livingNodes, lastNodeIndex.getAndIncrement()); + List selectedLivingNodes = nodeSelector.select(livingNodes); + if (false == selectedLivingNodes.isEmpty()) { + return selectedLivingNodes; + } + } + + /* + * Last resort: If there are no good nodes to use, either because + * the selector rejected all the living nodes or because there aren't + * any living ones. Either way, we want to revive a single dead node + * that the NodeSelectors are OK with. We do this by sorting the dead + * nodes by their revival time and passing them through the + * NodeSelector so it can have its say in which nodes are ok and their + * ordering. If the selector is ok with any of the nodes then use just + * the first one in the list because we only want to revive a single + * node. + */ + if (false == deadNodes.isEmpty()) { + Collections.sort(deadNodes, new Comparator() { + @Override + public int compare(DeadNodeAndRevival lhs, DeadNodeAndRevival rhs) { + return Long.compare(rhs.nanosUntilRevival, lhs.nanosUntilRevival); } - } else { - List rotatedHosts = new ArrayList<>(filteredHosts); - Collections.rotate(rotatedHosts, rotatedHosts.size() - lastHostIndex.getAndIncrement()); - nextHosts = rotatedHosts; + }); + + List selectedDeadNodes = new ArrayList<>(deadNodes.size()); + for (DeadNodeAndRevival n : deadNodes) { + selectedDeadNodes.add(n.node); + } + selectedDeadNodes = nodeSelector.select(selectedDeadNodes); + if (false == selectedDeadNodes.isEmpty()) { + return singletonList(selectedDeadNodes.get(0)); } - } while(nextHosts.isEmpty()); - return new HostTuple<>(nextHosts.iterator(), hostTuple.authCache); + } + throw new IOException("NodeSelector [" + nodeSelector + "] rejected all nodes, " + + "living " + livingNodes + " and dead " + deadNodes); } /** * Called after each successful request call. * Receives as an argument the host that was used for the successful request. */ - private void onResponse(HttpHost host) { - DeadHostState removedHost = this.blacklist.remove(host); + private void onResponse(Node node) { + DeadHostState removedHost = this.blacklist.remove(node.getHost()); if (logger.isDebugEnabled() && removedHost != null) { - logger.debug("removed host [" + host + "] from blacklist"); + logger.debug("removed [" + node + "] from blacklist"); } } @@ -609,20 +706,25 @@ private void onResponse(HttpHost host) { * Called after each failed attempt. * Receives as an argument the host that was used for the failed attempt. */ - private void onFailure(HttpHost host) { + private void onFailure(Node node) { while(true) { - DeadHostState previousDeadHostState = blacklist.putIfAbsent(host, new DeadHostState(DeadHostState.TimeSupplier.DEFAULT)); + DeadHostState previousDeadHostState = + blacklist.putIfAbsent(node.getHost(), new DeadHostState(System.nanoTime())); if (previousDeadHostState == null) { - logger.debug("added host [" + host + "] to blacklist"); + if (logger.isDebugEnabled()) { + logger.debug("added [" + node + "] to blacklist"); + } break; } - if (blacklist.replace(host, previousDeadHostState, - new DeadHostState(previousDeadHostState, DeadHostState.TimeSupplier.DEFAULT))) { - logger.debug("updated host [" + host + "] already in blacklist"); + if (blacklist.replace(node.getHost(), previousDeadHostState, + new DeadHostState(previousDeadHostState, System.nanoTime()))) { + if (logger.isDebugEnabled()) { + logger.debug("updated [" + node + "] already in blacklist"); + } break; } } - failureListener.onFailure(host); + failureListener.onFailure(node); } @Override @@ -856,27 +958,44 @@ Response get() throws IOException { */ public static class FailureListener { /** - * Notifies that the host provided as argument has just failed + * Notifies that the node provided as argument has just failed */ - public void onFailure(HttpHost host) { - - } + public void onFailure(Node node) {} } /** - * {@code HostTuple} enables the {@linkplain HttpHost}s and {@linkplain AuthCache} to be set together in a thread - * safe, volatile way. + * {@link NodeTupe} enables the {@linkplain Node}s and {@linkplain AuthCache} + * to be set together in a thread safe, volatile way. */ - private static class HostTuple { - final T hosts; + static class NodeTuple { + final T nodes; final AuthCache authCache; - HostTuple(final T hosts, final AuthCache authCache) { - this.hosts = hosts; + NodeTuple(final T nodes, final AuthCache authCache) { + this.nodes = nodes; this.authCache = authCache; } } + /** + * Contains a reference to a blacklisted node and the time until it is + * revived. We use this so we can do a single pass over the blacklist. + */ + private static class DeadNodeAndRevival { + final Node node; + final long nanosUntilRevival; + + DeadNodeAndRevival(Node node, long nanosUntilRevival) { + this.node = node; + this.nanosUntilRevival = nanosUntilRevival; + } + + @Override + public String toString() { + return node.toString(); + } + } + /** * Add all parameters from a map to a {@link Request}. This only exists * to support methods that exist for backwards compatibility. diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java b/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java index 8768c07161989..7cb91b4f34d93 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java @@ -48,7 +48,7 @@ public final class RestClientBuilder { private static final Header[] EMPTY_HEADERS = new Header[0]; - private final HttpHost[] hosts; + private final Node[] nodes; private int maxRetryTimeout = DEFAULT_MAX_RETRY_TIMEOUT_MILLIS; private Header[] defaultHeaders = EMPTY_HEADERS; private RestClient.FailureListener failureListener; @@ -59,18 +59,18 @@ public final class RestClientBuilder { /** * Creates a new builder instance and sets the hosts that the client will send requests to. * - * @throws NullPointerException if {@code hosts} or any host is {@code null}. - * @throws IllegalArgumentException if {@code hosts} is empty. + * @throws IllegalArgumentException if {@code nodes} is {@code null} or empty. */ - RestClientBuilder(HttpHost... hosts) { - Objects.requireNonNull(hosts, "hosts must not be null"); - if (hosts.length == 0) { - throw new IllegalArgumentException("no hosts provided"); + RestClientBuilder(Node[] nodes) { + if (nodes == null || nodes.length == 0) { + throw new IllegalArgumentException("nodes must not be null or empty"); } - for (HttpHost host : hosts) { - Objects.requireNonNull(host, "host cannot be null"); + for (Node node : nodes) { + if (node == null) { + throw new IllegalArgumentException("node cannot be null"); + } } - this.hosts = hosts; + this.nodes = nodes; } /** @@ -186,7 +186,7 @@ public CloseableHttpAsyncClient run() { return createHttpClient(); } }); - RestClient restClient = new RestClient(httpClient, maxRetryTimeout, defaultHeaders, hosts, pathPrefix, failureListener); + RestClient restClient = new RestClient(httpClient, maxRetryTimeout, defaultHeaders, nodes, pathPrefix, failureListener); httpClient.start(); return restClient; } diff --git a/client/rest/src/test/java/org/elasticsearch/client/DeadHostStateTests.java b/client/rest/src/test/java/org/elasticsearch/client/DeadHostStateTests.java index 75fbafd88f83c..ac8b9e748de19 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/DeadHostStateTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/DeadHostStateTests.java @@ -32,17 +32,17 @@ public class DeadHostStateTests extends RestClientTestCase { private static long[] EXPECTED_TIMEOUTS_SECONDS = new long[]{60, 84, 120, 169, 240, 339, 480, 678, 960, 1357, 1800}; public void testInitialDeadHostStateDefaultTimeSupplier() { - DeadHostState deadHostState = new DeadHostState(DeadHostState.TimeSupplier.DEFAULT); + DeadHostState deadHostState = new DeadHostState(System.nanoTime()); long currentTime = System.nanoTime(); assertThat(deadHostState.getDeadUntilNanos(), greaterThan(currentTime)); assertThat(deadHostState.getFailedAttempts(), equalTo(1)); } public void testDeadHostStateFromPreviousDefaultTimeSupplier() { - DeadHostState previous = new DeadHostState(DeadHostState.TimeSupplier.DEFAULT); + DeadHostState previous = new DeadHostState(System.nanoTime()); int iters = randomIntBetween(5, 30); for (int i = 0; i < iters; i++) { - DeadHostState deadHostState = new DeadHostState(previous, DeadHostState.TimeSupplier.DEFAULT); + DeadHostState deadHostState = new DeadHostState(previous, System.nanoTime()); assertThat(deadHostState.getDeadUntilNanos(), greaterThan(previous.getDeadUntilNanos())); assertThat(deadHostState.getFailedAttempts(), equalTo(previous.getFailedAttempts() + 1)); previous = deadHostState; @@ -54,9 +54,9 @@ public void testCompareToDefaultTimeSupplier() { DeadHostState[] deadHostStates = new DeadHostState[numObjects]; for (int i = 0; i < numObjects; i++) { if (i == 0) { - deadHostStates[i] = new DeadHostState(DeadHostState.TimeSupplier.DEFAULT); + deadHostStates[i] = new DeadHostState(System.nanoTime()); } else { - deadHostStates[i] = new DeadHostState(deadHostStates[i - 1], DeadHostState.TimeSupplier.DEFAULT); + deadHostStates[i] = new DeadHostState(deadHostStates[i - 1], System.nanoTime()); } } for (int k = 1; k < deadHostStates.length; k++) { @@ -65,54 +65,19 @@ public void testCompareToDefaultTimeSupplier() { } } - public void testShallBeRetried() { - ConfigurableTimeSupplier timeSupplier = new ConfigurableTimeSupplier(); - DeadHostState deadHostState = null; - for (int i = 0; i < EXPECTED_TIMEOUTS_SECONDS.length; i++) { - long expectedTimeoutSecond = EXPECTED_TIMEOUTS_SECONDS[i]; - timeSupplier.nanoTime = 0; - if (i == 0) { - deadHostState = new DeadHostState(timeSupplier); - } else { - deadHostState = new DeadHostState(deadHostState, timeSupplier); - } - for (int j = 0; j < expectedTimeoutSecond; j++) { - timeSupplier.nanoTime += TimeUnit.SECONDS.toNanos(1); - assertThat(deadHostState.shallBeRetried(), is(false)); - } - int iters = randomIntBetween(5, 30); - for (int j = 0; j < iters; j++) { - timeSupplier.nanoTime += TimeUnit.SECONDS.toNanos(1); - assertThat(deadHostState.shallBeRetried(), is(true)); - } - } - } - public void testDeadHostStateTimeouts() { - ConfigurableTimeSupplier zeroTimeSupplier = new ConfigurableTimeSupplier(); - zeroTimeSupplier.nanoTime = 0L; - DeadHostState previous = new DeadHostState(zeroTimeSupplier); + DeadHostState previous = new DeadHostState(0); for (long expectedTimeoutsSecond : EXPECTED_TIMEOUTS_SECONDS) { assertThat(TimeUnit.NANOSECONDS.toSeconds(previous.getDeadUntilNanos()), equalTo(expectedTimeoutsSecond)); - previous = new DeadHostState(previous, zeroTimeSupplier); + previous = new DeadHostState(previous, 0); } //check that from here on the timeout does not increase int iters = randomIntBetween(5, 30); for (int i = 0; i < iters; i++) { - DeadHostState deadHostState = new DeadHostState(previous, zeroTimeSupplier); + DeadHostState deadHostState = new DeadHostState(previous, 0); assertThat(TimeUnit.NANOSECONDS.toSeconds(deadHostState.getDeadUntilNanos()), equalTo(EXPECTED_TIMEOUTS_SECONDS[EXPECTED_TIMEOUTS_SECONDS.length - 1])); previous = deadHostState; } } - - private static class ConfigurableTimeSupplier implements DeadHostState.TimeSupplier { - - long nanoTime; - - @Override - public long nanoTime() { - return nanoTime; - } - } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/HostsTrackingFailureListener.java b/client/rest/src/test/java/org/elasticsearch/client/HostsTrackingFailureListener.java index e2f0ba81f6ed7..f882496838dc0 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/HostsTrackingFailureListener.java +++ b/client/rest/src/test/java/org/elasticsearch/client/HostsTrackingFailureListener.java @@ -29,14 +29,22 @@ import static org.junit.Assert.assertThat; /** - * {@link org.elasticsearch.client.RestClient.FailureListener} impl that allows to track when it gets called for which host. + * {@link RestClient.FailureListener} impl that allows to track when it gets called for which host. */ class HostsTrackingFailureListener extends RestClient.FailureListener { private volatile Set hosts = new HashSet<>(); @Override - public void onFailure(HttpHost host) { - hosts.add(host); + public void onFailure(Node node) { + hosts.add(node.getHost()); + } + + void assertCalled(Node... nodes) { + HttpHost[] hosts = new HttpHost[nodes.length]; + for (int i = 0; i < nodes.length; i++) { + hosts[i] = nodes[i].getHost(); + } + assertCalled(hosts); } void assertCalled(HttpHost... hosts) { @@ -48,4 +56,4 @@ void assertCalled(HttpHost... hosts) { void assertNotCalled() { assertEquals(0, hosts.size()); } -} \ No newline at end of file +} diff --git a/client/rest/src/test/java/org/elasticsearch/client/NodeSelectorTests.java b/client/rest/src/test/java/org/elasticsearch/client/NodeSelectorTests.java new file mode 100644 index 0000000000000..e8aa7a175be8b --- /dev/null +++ b/client/rest/src/test/java/org/elasticsearch/client/NodeSelectorTests.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.HttpHost; +import org.elasticsearch.client.Node.Roles; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +import static org.junit.Assert.assertEquals; + +public class NodeSelectorTests extends RestClientTestCase { + public void testAny() { + List nodes = new ArrayList<>(); + int size = between(2, 5); + for (int i = 0; i < size; i++) { + nodes.add(dummyNode(randomBoolean(), randomBoolean(), randomBoolean())); + } + assertEquals(nodes, NodeSelector.ANY.select(nodes)); + } + + public void testNotMasterOnly() { + Node masterOnly = dummyNode(true, false, randomBoolean()); + Node masterAndData = dummyNode(true, true, randomBoolean()); + Node coordinatingOnly = dummyNode(false, false, randomBoolean()); + Node data = dummyNode(false, true, randomBoolean()); + List nodes = Arrays.asList(masterOnly, masterAndData, coordinatingOnly, data); + Collections.shuffle(nodes, getRandom()); + List expected = new ArrayList<>(nodes); + expected.remove(masterOnly); + assertEquals(expected, NodeSelector.NOT_MASTER_ONLY.select(nodes)); + } + + private Node dummyNode(boolean master, boolean data, boolean ingest) { + return new Node(new HttpHost("dummy"), Collections.emptySet(), + randomAsciiAlphanumOfLength(5), randomAsciiAlphanumOfLength(5), + new Roles(master, data, ingest)); + } +} diff --git a/client/rest/src/test/java/org/elasticsearch/client/NodeTests.java b/client/rest/src/test/java/org/elasticsearch/client/NodeTests.java new file mode 100644 index 0000000000000..989861df50293 --- /dev/null +++ b/client/rest/src/test/java/org/elasticsearch/client/NodeTests.java @@ -0,0 +1,89 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.HttpHost; +import org.elasticsearch.client.Node.Roles; + +import java.util.Arrays; +import java.util.HashSet; + +import static java.util.Collections.singleton; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +public class NodeTests extends RestClientTestCase { + public void testWithHost() { + HttpHost h1 = new HttpHost("1"); + HttpHost h2 = new HttpHost("2"); + HttpHost h3 = new HttpHost("3"); + + Node n = new Node(h1, new HashSet<>(Arrays.asList(h1, h2)), + randomAsciiAlphanumOfLength(5), randomAsciiAlphanumOfLength(5), + new Roles(randomBoolean(), randomBoolean(), randomBoolean())); + + // Host is in the bound hosts list + assertEquals(h2, n.withHost(h2).getHost()); + assertEquals(n.getBoundHosts(), n.withHost(h2).getBoundHosts()); + + // Host not in the bound hosts list + assertEquals(h3, n.withHost(h3).getHost()); + assertEquals(new HashSet<>(Arrays.asList(h1, h2, h3)), n.withHost(h3).getBoundHosts()); + } + + public void testToString() { + assertEquals("[host=http://1]", new Node(new HttpHost("1")).toString()); + assertEquals("[host=http://1, roles=mdi]", new Node(new HttpHost("1"), + null, null, null, new Roles(true, true, true)).toString()); + assertEquals("[host=http://1, version=ver]", new Node(new HttpHost("1"), + null, null, "ver", null).toString()); + assertEquals("[host=http://1, name=nam]", new Node(new HttpHost("1"), + null, "nam", null, null).toString()); + assertEquals("[host=http://1, bound=[http://1, http://2]]", new Node(new HttpHost("1"), + new HashSet<>(Arrays.asList(new HttpHost("1"), new HttpHost("2"))), null, null, null).toString()); + assertEquals("[host=http://1, bound=[http://1, http://2], name=nam, version=ver, roles=m]", + new Node(new HttpHost("1"), new HashSet<>(Arrays.asList(new HttpHost("1"), new HttpHost("2"))), + "nam", "ver", new Roles(true, false, false)).toString()); + + } + + public void testEqualsAndHashCode() { + HttpHost host = new HttpHost(randomAsciiAlphanumOfLength(5)); + Node node = new Node(host, + randomBoolean() ? null : singleton(host), + randomBoolean() ? null : randomAsciiAlphanumOfLength(5), + randomBoolean() ? null : randomAsciiAlphanumOfLength(5), + randomBoolean() ? null : new Roles(true, true, true)); + assertFalse(node.equals(null)); + assertTrue(node.equals(node)); + assertEquals(node.hashCode(), node.hashCode()); + Node copy = new Node(host, node.getBoundHosts(), node.getName(), node.getVersion(), node.getRoles()); + assertTrue(node.equals(copy)); + assertEquals(node.hashCode(), copy.hashCode()); + assertFalse(node.equals(new Node(new HttpHost(host.toHostString() + "changed"), node.getBoundHosts(), + node.getName(), node.getVersion(), node.getRoles()))); + assertFalse(node.equals(new Node(host, new HashSet<>(Arrays.asList(host, new HttpHost(host.toHostString() + "changed"))), + node.getName(), node.getVersion(), node.getRoles()))); + assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName() + "changed", node.getVersion(), node.getRoles()))); + assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName(), node.getVersion() + "changed", node.getRoles()))); + assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName(), node.getVersion(), new Roles(false, false, false)))); + } +} diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java index 9657e782bda04..39be4db3b98ee 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java @@ -39,21 +39,42 @@ public void testBuild() throws IOException { try { RestClient.builder((HttpHost[])null); fail("should have failed"); - } catch(NullPointerException e) { - assertEquals("hosts must not be null", e.getMessage()); + } catch(IllegalArgumentException e) { + assertEquals("hosts must not be null or empty", e.getMessage()); + } + + try { + RestClient.builder(new HttpHost[] {}); + fail("should have failed"); + } catch(IllegalArgumentException e) { + assertEquals("hosts must not be null or empty", e.getMessage()); } try { - RestClient.builder(); + RestClient.builder((Node[])null); fail("should have failed"); } catch(IllegalArgumentException e) { - assertEquals("no hosts provided", e.getMessage()); + assertEquals("nodes must not be null or empty", e.getMessage()); + } + + try { + RestClient.builder(new Node[] {}); + fail("should have failed"); + } catch(IllegalArgumentException e) { + assertEquals("nodes must not be null or empty", e.getMessage()); + } + + try { + RestClient.builder(new Node(new HttpHost("localhost", 9200)), null); + fail("should have failed"); + } catch(IllegalArgumentException e) { + assertEquals("node cannot be null", e.getMessage()); } try { RestClient.builder(new HttpHost("localhost", 9200), null); fail("should have failed"); - } catch(NullPointerException e) { + } catch(IllegalArgumentException e) { assertEquals("host cannot be null", e.getMessage()); } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java index aa323276404cf..fcbf52d665197 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java @@ -29,19 +29,23 @@ import org.junit.BeforeClass; import java.io.IOException; +import java.net.ConnectException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import static java.util.Collections.singletonList; import static org.elasticsearch.client.RestClientTestUtil.getAllStatusCodes; import static org.elasticsearch.client.RestClientTestUtil.randomErrorNoRetryStatusCode; import static org.elasticsearch.client.RestClientTestUtil.randomOkStatusCode; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; /** * Integration test to check interaction between {@link RestClient} and {@link org.apache.http.client.HttpClient}. @@ -50,31 +54,37 @@ public class RestClientMultipleHostsIntegTests extends RestClientTestCase { private static HttpServer[] httpServers; - private static RestClient restClient; + private static HttpHost[] httpHosts; + private static boolean stoppedFirstHost = false; + private static String pathPrefixWithoutLeadingSlash; private static String pathPrefix; + private static RestClient restClient; @BeforeClass public static void startHttpServer() throws Exception { - String pathPrefixWithoutLeadingSlash; if (randomBoolean()) { - pathPrefixWithoutLeadingSlash = "testPathPrefix/" + randomAsciiOfLengthBetween(1, 5); + pathPrefixWithoutLeadingSlash = "testPathPrefix/" + randomAsciiLettersOfLengthBetween(1, 5); pathPrefix = "/" + pathPrefixWithoutLeadingSlash; } else { pathPrefix = pathPrefixWithoutLeadingSlash = ""; } int numHttpServers = randomIntBetween(2, 4); httpServers = new HttpServer[numHttpServers]; - HttpHost[] httpHosts = new HttpHost[numHttpServers]; + httpHosts = new HttpHost[numHttpServers]; for (int i = 0; i < numHttpServers; i++) { HttpServer httpServer = createHttpServer(); httpServers[i] = httpServer; httpHosts[i] = new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()); } + restClient = buildRestClient(); + } + + private static RestClient buildRestClient() { RestClientBuilder restClientBuilder = RestClient.builder(httpHosts); if (pathPrefix.length() > 0) { restClientBuilder.setPathPrefix((randomBoolean() ? "/" : "") + pathPrefixWithoutLeadingSlash); } - restClient = restClientBuilder.build(); + return restClientBuilder.build(); } private static HttpServer createHttpServer() throws Exception { @@ -118,6 +128,9 @@ public void stopRandomHost() { if (httpServers.length > 1 && randomBoolean()) { List updatedHttpServers = new ArrayList<>(httpServers.length - 1); int nodeIndex = randomInt(httpServers.length - 1); + if (0 == nodeIndex) { + stoppedFirstHost = true; + } for (int i = 0; i < httpServers.length; i++) { HttpServer httpServer = httpServers[i]; if (i == nodeIndex) { @@ -182,6 +195,33 @@ public void onFailure(Exception exception) { } } + /** + * Test host selector against a real server and + * test what happens after calling + */ + public void testNodeSelector() throws IOException { + Request request = new Request("GET", "/200"); + request.setNodeSelector(firstPositionNodeSelector()); + int rounds = between(1, 10); + for (int i = 0; i < rounds; i++) { + /* + * Run the request more than once to verify that the + * NodeSelector overrides the round robin behavior. + */ + if (stoppedFirstHost) { + try { + restClient.performRequest(request); + fail("expected to fail to connect"); + } catch (ConnectException e) { + assertEquals("Connection refused", e.getMessage()); + } + } else { + Response response = restClient.performRequest(request); + assertEquals(httpHosts[0], response.getHost()); + } + } + } + private static class TestResponse { private final String method; private final int statusCode; @@ -203,4 +243,18 @@ Response getResponse() { throw new AssertionError("unexpected response " + response.getClass()); } } + + private NodeSelector firstPositionNodeSelector() { + return new NodeSelector() { + @Override + public List select(List nodes) { + for (Node node : nodes) { + if (httpHosts[0] == node.getHost()) { + return singletonList(node); + } + } + return Collections.emptyList(); + } + }; + } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java index a3a834ff3204b..908a3e4747baa 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java @@ -35,6 +35,7 @@ import org.apache.http.message.BasicStatusLine; import org.apache.http.nio.protocol.HttpAsyncRequestProducer; import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; +import org.elasticsearch.client.Node.Roles; import org.junit.After; import org.junit.Before; import org.mockito.invocation.InvocationOnMock; @@ -44,17 +45,20 @@ import java.net.SocketTimeoutException; import java.util.Collections; import java.util.HashSet; +import java.util.List; import java.util.Set; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; +import static java.util.Collections.singletonList; import static org.elasticsearch.client.RestClientTestUtil.randomErrorNoRetryStatusCode; import static org.elasticsearch.client.RestClientTestUtil.randomErrorRetryStatusCode; import static org.elasticsearch.client.RestClientTestUtil.randomHttpMethod; import static org.elasticsearch.client.RestClientTestUtil.randomOkStatusCode; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.Matchers.hasItem; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; @@ -71,7 +75,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase { private ExecutorService exec = Executors.newFixedThreadPool(1); private RestClient restClient; - private HttpHost[] httpHosts; + private Node[] nodes; private HostsTrackingFailureListener failureListener; @Before @@ -108,13 +112,13 @@ public void run() { return null; } }); - int numHosts = RandomNumbers.randomIntBetween(getRandom(), 2, 5); - httpHosts = new HttpHost[numHosts]; - for (int i = 0; i < numHosts; i++) { - httpHosts[i] = new HttpHost("localhost", 9200 + i); + int numNodes = RandomNumbers.randomIntBetween(getRandom(), 2, 5); + nodes = new Node[numNodes]; + for (int i = 0; i < numNodes; i++) { + nodes[i] = new Node(new HttpHost("localhost", 9200 + i)); } failureListener = new HostsTrackingFailureListener(); - restClient = new RestClient(httpClient, 10000, new Header[0], httpHosts, null, failureListener); + restClient = new RestClient(httpClient, 10000, new Header[0], nodes, null, failureListener); } /** @@ -128,9 +132,8 @@ public void shutdownExec() { public void testRoundRobinOkStatusCodes() throws IOException { int numIters = RandomNumbers.randomIntBetween(getRandom(), 1, 5); for (int i = 0; i < numIters; i++) { - Set hostsSet = new HashSet<>(); - Collections.addAll(hostsSet, httpHosts); - for (int j = 0; j < httpHosts.length; j++) { + Set hostsSet = hostsSet(); + for (int j = 0; j < nodes.length; j++) { int statusCode = randomOkStatusCode(getRandom()); Response response = restClient.performRequest(randomHttpMethod(getRandom()), "/" + statusCode); assertEquals(statusCode, response.getStatusLine().getStatusCode()); @@ -144,9 +147,8 @@ public void testRoundRobinOkStatusCodes() throws IOException { public void testRoundRobinNoRetryErrors() throws IOException { int numIters = RandomNumbers.randomIntBetween(getRandom(), 1, 5); for (int i = 0; i < numIters; i++) { - Set hostsSet = new HashSet<>(); - Collections.addAll(hostsSet, httpHosts); - for (int j = 0; j < httpHosts.length; j++) { + Set hostsSet = hostsSet(); + for (int j = 0; j < nodes.length; j++) { String method = randomHttpMethod(getRandom()); int statusCode = randomErrorNoRetryStatusCode(getRandom()); try { @@ -185,10 +187,9 @@ public void testRoundRobinRetryErrors() throws IOException { * the caller. It wraps the exception that contains the failed hosts. */ e = (ResponseException) e.getCause(); - Set hostsSet = new HashSet<>(); - Collections.addAll(hostsSet, httpHosts); + Set hostsSet = hostsSet(); //first request causes all the hosts to be blacklisted, the returned exception holds one suppressed exception each - failureListener.assertCalled(httpHosts); + failureListener.assertCalled(nodes); do { Response response = e.getResponse(); assertEquals(Integer.parseInt(retryEndpoint.substring(1)), response.getStatusLine().getStatusCode()); @@ -210,10 +211,9 @@ public void testRoundRobinRetryErrors() throws IOException { * the caller. It wraps the exception that contains the failed hosts. */ e = (IOException) e.getCause(); - Set hostsSet = new HashSet<>(); - Collections.addAll(hostsSet, httpHosts); + Set hostsSet = hostsSet(); //first request causes all the hosts to be blacklisted, the returned exception holds one suppressed exception each - failureListener.assertCalled(httpHosts); + failureListener.assertCalled(nodes); do { HttpHost httpHost = HttpHost.create(e.getMessage()); assertTrue("host [" + httpHost + "] not found, most likely used multiple times", hostsSet.remove(httpHost)); @@ -232,9 +232,8 @@ public void testRoundRobinRetryErrors() throws IOException { int numIters = RandomNumbers.randomIntBetween(getRandom(), 2, 5); for (int i = 1; i <= numIters; i++) { //check that one different host is resurrected at each new attempt - Set hostsSet = new HashSet<>(); - Collections.addAll(hostsSet, httpHosts); - for (int j = 0; j < httpHosts.length; j++) { + Set hostsSet = hostsSet(); + for (int j = 0; j < nodes.length; j++) { retryEndpoint = randomErrorRetryEndpoint(); try { restClient.performRequest(randomHttpMethod(getRandom()), retryEndpoint); @@ -308,6 +307,47 @@ public void testRoundRobinRetryErrors() throws IOException { } } + public void testNodeSelector() throws IOException { + NodeSelector firstPositionOnly = new NodeSelector() { + @Override + public List select(List restClientNodes) { + assertThat(restClientNodes, hasItem(nodes[0])); + return singletonList(nodes[0]); + } + }; + int rounds = between(1, 10); + for (int i = 0; i < rounds; i++) { + /* + * Run the request more than once to verify that the + * NodeSelector overrides the round robin behavior. + */ + Request request = new Request("GET", "/200"); + request.setNodeSelector(firstPositionOnly); + Response response = restClient.performRequest(request); + assertEquals(nodes[0].getHost(), response.getHost()); + } + } + + public void testSetNodes() throws IOException { + Node[] newNodes = new Node[nodes.length]; + for (int i = 0; i < nodes.length; i++) { + Roles roles = i == 0 ? new Roles(false, true, true) : new Roles(true, false, false); + newNodes[i] = new Node(nodes[i].getHost(), null, null, null, roles); + } + restClient.setNodes(newNodes); + int rounds = between(1, 10); + for (int i = 0; i < rounds; i++) { + /* + * Run the request more than once to verify that the + * NodeSelector overrides the round robin behavior. + */ + Request request = new Request("GET", "/200"); + request.setNodeSelector(NodeSelector.NOT_MASTER_ONLY); + Response response = restClient.performRequest(request); + assertEquals(newNodes[0].getHost(), response.getHost()); + } + } + private static String randomErrorRetryEndpoint() { switch(RandomNumbers.randomIntBetween(getRandom(), 0, 3)) { case 0: @@ -321,4 +361,16 @@ private static String randomErrorRetryEndpoint() { } throw new UnsupportedOperationException(); } + + /** + * Build a mutable {@link Set} containing all the {@link Node#getHost() hosts} + * in use by the test. + */ + private Set hostsSet() { + Set hosts = new HashSet<>(); + for (Node node : nodes) { + hosts.add(node.getHost()); + } + return hosts; + } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java index 2d419b213d686..dd934e32ee421 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java @@ -96,7 +96,7 @@ public class RestClientSingleHostTests extends RestClientTestCase { private ExecutorService exec = Executors.newFixedThreadPool(1); private RestClient restClient; private Header[] defaultHeaders; - private HttpHost httpHost; + private Node node; private CloseableHttpAsyncClient httpClient; private HostsTrackingFailureListener failureListener; @@ -110,7 +110,7 @@ public void createRestClient() { public Future answer(InvocationOnMock invocationOnMock) throws Throwable { HttpAsyncRequestProducer requestProducer = (HttpAsyncRequestProducer) invocationOnMock.getArguments()[0]; HttpClientContext context = (HttpClientContext) invocationOnMock.getArguments()[2]; - assertThat(context.getAuthCache().get(httpHost), instanceOf(BasicScheme.class)); + assertThat(context.getAuthCache().get(node.getHost()), instanceOf(BasicScheme.class)); final FutureCallback futureCallback = (FutureCallback) invocationOnMock.getArguments()[3]; HttpUriRequest request = (HttpUriRequest)requestProducer.generateRequest(); @@ -148,9 +148,9 @@ public void run() { }); defaultHeaders = RestClientTestUtil.randomHeaders(getRandom(), "Header-default"); - httpHost = new HttpHost("localhost", 9200); + node = new Node(new HttpHost("localhost", 9200)); failureListener = new HostsTrackingFailureListener(); - restClient = new RestClient(httpClient, 10000, defaultHeaders, new HttpHost[]{httpHost}, null, failureListener); + restClient = new RestClient(httpClient, 10000, defaultHeaders, new Node[] {node}, null, failureListener); } /** @@ -246,7 +246,7 @@ public void testErrorStatusCodes() throws IOException { if (errorStatusCode <= 500 || expectedIgnores.contains(errorStatusCode)) { failureListener.assertNotCalled(); } else { - failureListener.assertCalled(httpHost); + failureListener.assertCalled(node); } } } @@ -261,14 +261,14 @@ public void testIOExceptions() { } catch(IOException e) { assertThat(e, instanceOf(ConnectTimeoutException.class)); } - failureListener.assertCalled(httpHost); + failureListener.assertCalled(node); try { performRequest(method, "/soe"); fail("request should have failed"); } catch(IOException e) { assertThat(e, instanceOf(SocketTimeoutException.class)); } - failureListener.assertCalled(httpHost); + failureListener.assertCalled(node); } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java index 872b327954b02..a3b295ec7c627 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java @@ -22,12 +22,20 @@ import org.apache.http.Header; import org.apache.http.HttpHost; import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; +import org.elasticsearch.client.RestClient.NodeTuple; import java.io.IOException; import java.net.URI; +import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicInteger; +import static java.util.Collections.singletonList; import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods; import static org.hamcrest.Matchers.instanceOf; import static org.junit.Assert.assertEquals; @@ -40,9 +48,9 @@ public class RestClientTests extends RestClientTestCase { public void testCloseIsIdempotent() throws IOException { - HttpHost[] hosts = new HttpHost[]{new HttpHost("localhost", 9200)}; + Node[] nodes = new Node[] {new Node(new HttpHost("localhost", 9200))}; CloseableHttpAsyncClient closeableHttpAsyncClient = mock(CloseableHttpAsyncClient.class); - RestClient restClient = new RestClient(closeableHttpAsyncClient, 1_000, new Header[0], hosts, null, null); + RestClient restClient = new RestClient(closeableHttpAsyncClient, 1_000, new Header[0], nodes, null, null); restClient.close(); verify(closeableHttpAsyncClient, times(1)).close(); restClient.close(); @@ -209,24 +217,24 @@ public void testSetHostsWrongArguments() throws IOException { restClient.setHosts((HttpHost[]) null); fail("setHosts should have failed"); } catch (IllegalArgumentException e) { - assertEquals("hosts must not be null nor empty", e.getMessage()); + assertEquals("hosts must not be null or empty", e.getMessage()); } try (RestClient restClient = createRestClient()) { restClient.setHosts(); fail("setHosts should have failed"); } catch (IllegalArgumentException e) { - assertEquals("hosts must not be null nor empty", e.getMessage()); + assertEquals("hosts must not be null or empty", e.getMessage()); } try (RestClient restClient = createRestClient()) { restClient.setHosts((HttpHost) null); fail("setHosts should have failed"); - } catch (NullPointerException e) { + } catch (IllegalArgumentException e) { assertEquals("host cannot be null", e.getMessage()); } try (RestClient restClient = createRestClient()) { restClient.setHosts(new HttpHost("localhost", 9200), null, new HttpHost("localhost", 9201)); fail("setHosts should have failed"); - } catch (NullPointerException e) { + } catch (IllegalArgumentException e) { assertEquals("host cannot be null", e.getMessage()); } } @@ -248,8 +256,209 @@ public void testNullPath() throws IOException { } } + public void testSelectHosts() throws IOException { + int iterations = 1000; + Node n1 = new Node(new HttpHost("1"), null, null, "1", null); + Node n2 = new Node(new HttpHost("2"), null, null, "2", null); + Node n3 = new Node(new HttpHost("3"), null, null, "3", null); + List nodes = Arrays.asList(n1, n2, n3); + + NodeSelector not1 = new NodeSelector() { + @Override + public List select(List nodes) { + List result = new ArrayList<>(); + for (Node node : nodes) { + if (false == "1".equals(node.getVersion())) { + result.add(node); + } + } + return result; + } + + @Override + public String toString() { + return "NOT 1"; + } + }; + NodeSelector noNodes = new NodeSelector() { + @Override + public List select(List nodes) { + return Collections.emptyList(); + } + + @Override + public String toString() { + return "NONE"; + } + }; + + NodeTuple> nodeTuple = new NodeTuple<>(nodes, null); + Map blacklist = new HashMap<>(); + AtomicInteger lastNodeIndex = new AtomicInteger(0); + long now = 0; + + // Normal case + List expectedNodes = Arrays.asList(n1, n2, n3); + assertEquals(expectedNodes, RestClient.selectHosts(nodeTuple, blacklist, + lastNodeIndex, now, NodeSelector.ANY)); + // Calling it again rotates the set of results + for (int i = 0; i < iterations; i++) { + Collections.rotate(expectedNodes, 1); + assertEquals(expectedNodes, RestClient.selectHosts(nodeTuple, blacklist, + lastNodeIndex, now, NodeSelector.ANY)); + } + + // Exclude some node + lastNodeIndex.set(0); + // h1 excluded + assertEquals(Arrays.asList(n2, n3), RestClient.selectHosts(nodeTuple, blacklist, + lastNodeIndex, now, not1)); + // Calling it again rotates the set of results + assertEquals(Arrays.asList(n3, n2), RestClient.selectHosts(nodeTuple, blacklist, + lastNodeIndex, now, not1)); + // And again, same + assertEquals(Arrays.asList(n2, n3), RestClient.selectHosts(nodeTuple, blacklist, + lastNodeIndex, now, not1)); + /* + * But this time it doesn't because the list being filtered changes + * from (h1, h2, h3) to (h2, h3, h1) which both look the same when + * you filter out h1. + */ + assertEquals(Arrays.asList(n2, n3), RestClient.selectHosts(nodeTuple, blacklist, + lastNodeIndex, now, not1)); + + /* + * Try a NodeSelector that excludes all nodes. This should + * throw an exception + */ + lastNodeIndex.set(0); + try { + RestClient.selectHosts(nodeTuple, blacklist, lastNodeIndex, now, noNodes); + fail("expected selectHosts to fail"); + } catch (IOException e) { + String message = "NodeSelector [NONE] rejected all nodes, living [" + + "[host=http://1, version=1], [host=http://2, version=2], " + + "[host=http://3, version=3]] and dead []"; + assertEquals(message, e.getMessage()); + } + + /* + * Mark all nodes as dead and look up at a time *after* the + * revival time. This should return all nodes. + */ + blacklist.put(n1.getHost(), new DeadHostState(1)); + blacklist.put(n2.getHost(), new DeadHostState(new DeadHostState(1), 1)); + blacklist.put(n3.getHost(), new DeadHostState(new DeadHostState(new DeadHostState(1), 1), 1)); + lastNodeIndex.set(0); + now = DeadHostState.MAX_CONNECTION_TIMEOUT_NANOS + 1; + expectedNodes = Arrays.asList(n1, n2, n3); + assertEquals(expectedNodes, RestClient.selectHosts(nodeTuple, blacklist, lastNodeIndex, + now, NodeSelector.ANY)); + // Calling it again rotates the set of results + for (int i = 0; i < iterations; i++) { + Collections.rotate(expectedNodes, 1); + assertEquals(expectedNodes, RestClient.selectHosts(nodeTuple, blacklist, + lastNodeIndex, now, NodeSelector.ANY)); + } + + /* + * Now try with the nodes dead and *not* past their dead time. + * Only the node closest to revival should come back. + */ + now = 0; + assertEquals(singletonList(n1), RestClient.selectHosts(nodeTuple, blacklist, lastNodeIndex, + now, NodeSelector.ANY)); + + /* + * Now try with the nodes dead and *not* past their dead time + * *and* a node selector that removes the node that is closest + * to being revived. The second closest node should come back. + */ + assertEquals(singletonList(n2), RestClient.selectHosts(nodeTuple, blacklist, + lastNodeIndex, now, not1)); + + /* + * Try a NodeSelector that excludes all nodes. This should + * return a failure, but a different failure than normal + * because it'll block revival rather than outright reject + * healthy nodes. + */ + lastNodeIndex.set(0); + try { + RestClient.selectHosts(nodeTuple, blacklist, lastNodeIndex, now, noNodes); + fail("expected selectHosts to fail"); + } catch (IOException e) { + String message = "NodeSelector [NONE] rejected all nodes, living [] and dead [" + + "[host=http://1, version=1], [host=http://2, version=2], " + + "[host=http://3, version=3]]"; + assertEquals(message, e.getMessage()); + } + } + + public void testSetHostsFailures() throws IOException { + RestClient restClient = createRestClient(); + try { + restClient.setHosts((HttpHost[]) null); + fail("setHosts should have failed"); + } catch (IllegalArgumentException e) { + assertEquals("hosts must not be null or empty", e.getMessage()); + } + try { + restClient.setHosts(); + fail("setHosts should have failed"); + } catch (IllegalArgumentException e) { + assertEquals("hosts must not be null or empty", e.getMessage()); + } + try { + restClient.setHosts((HttpHost) null); + fail("setHosts should have failed"); + } catch (IllegalArgumentException e) { + assertEquals("host cannot be null", e.getMessage()); + } + try { + restClient.setHosts(new HttpHost("localhost", 9200), null, new HttpHost("localhost", 9201)); + fail("setHosts should have failed"); + } catch (IllegalArgumentException e) { + assertEquals("host cannot be null", e.getMessage()); + } + } + + public void testSetNodesFailures() throws IOException { + RestClient restClient = createRestClient(); + try { + restClient.setNodes((Node[]) null); + fail("setNodes should have failed"); + } catch (IllegalArgumentException e) { + assertEquals("nodes must not be null or empty", e.getMessage()); + } + try { + restClient.setNodes(); + fail("setNodes should have failed"); + } catch (IllegalArgumentException e) { + assertEquals("nodes must not be null or empty", e.getMessage()); + } + try { + restClient.setNodes((Node) null); + fail("setNodes should have failed"); + } catch (IllegalArgumentException e) { + assertEquals("node cannot be null", e.getMessage()); + } + try { + restClient.setNodes( + new Node(new HttpHost("localhost", 9200)), + null, + new Node(new HttpHost("localhost", 9201))); + fail("setNodes should have failed"); + } catch (IllegalArgumentException e) { + assertEquals("node cannot be null", e.getMessage()); + } + } + private static RestClient createRestClient() { - HttpHost[] hosts = new HttpHost[]{new HttpHost("localhost", 9200)}; - return new RestClient(mock(CloseableHttpAsyncClient.class), randomIntBetween(1_000, 30_000), new Header[]{}, hosts, null, null); + Node[] nodes = new Node[] {new Node(new HttpHost("localhost", 9200))}; + return new RestClient(mock(CloseableHttpAsyncClient.class), randomLongBetween(1_000, 30_000), + new Header[] {}, nodes, null, null); } + + } diff --git a/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java b/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java index 5ee97399b34e6..65259cd5e4d78 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java +++ b/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java @@ -39,6 +39,8 @@ import org.apache.http.ssl.SSLContexts; import org.apache.http.util.EntityUtils; import org.elasticsearch.client.HttpAsyncResponseConsumerFactory; +import org.elasticsearch.client.Node; +import org.elasticsearch.client.NodeSelector; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseListener; @@ -107,7 +109,7 @@ public void testUsage() throws IOException, InterruptedException { RestClientBuilder builder = RestClient.builder(new HttpHost("localhost", 9200, "http")); builder.setFailureListener(new RestClient.FailureListener() { @Override - public void onFailure(HttpHost host) { + public void onFailure(Node node) { // <1> } }); @@ -184,6 +186,10 @@ public void onFailure(Exception exception) { request.setHttpAsyncResponseConsumerFactory( new HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory(30 * 1024 * 1024)); //end::rest-client-response-consumer + //tag::rest-client-node-selector + // TODO link me to docs + request.setNodeSelector(NodeSelector.NOT_MASTER_ONLY); + //end::rest-client-node-selector } { HttpEntity[] documents = new HttpEntity[10]; diff --git a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/ElasticsearchHostsSniffer.java b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/ElasticsearchNodesSniffer.java similarity index 50% rename from client/sniffer/src/main/java/org/elasticsearch/client/sniff/ElasticsearchHostsSniffer.java rename to client/sniffer/src/main/java/org/elasticsearch/client/sniff/ElasticsearchNodesSniffer.java index 34a4988358653..d5e3b9112f599 100644 --- a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/ElasticsearchHostsSniffer.java +++ b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/ElasticsearchNodesSniffer.java @@ -26,31 +26,36 @@ import org.apache.commons.logging.LogFactory; import org.apache.http.HttpEntity; import org.apache.http.HttpHost; +import org.elasticsearch.client.Node; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestClient; +import org.elasticsearch.client.Node.Roles; import java.io.IOException; import java.io.InputStream; import java.net.URI; import java.util.ArrayList; import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; import java.util.concurrent.TimeUnit; /** * Class responsible for sniffing the http hosts from elasticsearch through the nodes info api and returning them back. - * Compatible with elasticsearch 5.x and 2.x. + * Compatible with elasticsearch 2.x+. */ -public final class ElasticsearchHostsSniffer implements HostsSniffer { +public final class ElasticsearchNodesSniffer implements NodesSniffer { - private static final Log logger = LogFactory.getLog(ElasticsearchHostsSniffer.class); + private static final Log logger = LogFactory.getLog(ElasticsearchNodesSniffer.class); public static final long DEFAULT_SNIFF_REQUEST_TIMEOUT = TimeUnit.SECONDS.toMillis(1); private final RestClient restClient; - private final Map sniffRequestParams; + private final Request request; private final Scheme scheme; private final JsonFactory jsonFactory = new JsonFactory(); @@ -62,8 +67,8 @@ public final class ElasticsearchHostsSniffer implements HostsSniffer { * that is also provided to {@link Sniffer#builder(RestClient)}, so that the hosts are set to the same * client that was used to fetch them. */ - public ElasticsearchHostsSniffer(RestClient restClient) { - this(restClient, DEFAULT_SNIFF_REQUEST_TIMEOUT, ElasticsearchHostsSniffer.Scheme.HTTP); + public ElasticsearchNodesSniffer(RestClient restClient) { + this(restClient, DEFAULT_SNIFF_REQUEST_TIMEOUT, ElasticsearchNodesSniffer.Scheme.HTTP); } /** @@ -77,30 +82,32 @@ public ElasticsearchHostsSniffer(RestClient restClient) { * that have responded within this timeout will be returned. * @param scheme the scheme to associate sniffed nodes with (as it is not returned by elasticsearch) */ - public ElasticsearchHostsSniffer(RestClient restClient, long sniffRequestTimeoutMillis, Scheme scheme) { + public ElasticsearchNodesSniffer(RestClient restClient, long sniffRequestTimeoutMillis, Scheme scheme) { this.restClient = Objects.requireNonNull(restClient, "restClient cannot be null"); if (sniffRequestTimeoutMillis < 0) { throw new IllegalArgumentException("sniffRequestTimeoutMillis must be greater than 0"); } - this.sniffRequestParams = Collections.singletonMap("timeout", sniffRequestTimeoutMillis + "ms"); + this.request = new Request("GET", "/_nodes/http"); + request.addParameter("timeout", sniffRequestTimeoutMillis + "ms"); this.scheme = Objects.requireNonNull(scheme, "scheme cannot be null"); } /** * Calls the elasticsearch nodes info api, parses the response and returns all the found http hosts */ - public List sniffHosts() throws IOException { - Response response = restClient.performRequest("get", "/_nodes/http", sniffRequestParams); - return readHosts(response.getEntity()); + @Override + public List sniff() throws IOException { + Response response = restClient.performRequest(request); + return readHosts(response.getEntity(), scheme, jsonFactory); } - private List readHosts(HttpEntity entity) throws IOException { + static List readHosts(HttpEntity entity, Scheme scheme, JsonFactory jsonFactory) throws IOException { try (InputStream inputStream = entity.getContent()) { JsonParser parser = jsonFactory.createParser(inputStream); if (parser.nextToken() != JsonToken.START_OBJECT) { throw new IOException("expected data to start with an object"); } - List hosts = new ArrayList<>(); + List nodes = new ArrayList<>(); while (parser.nextToken() != JsonToken.END_OBJECT) { if (parser.getCurrentToken() == JsonToken.START_OBJECT) { if ("nodes".equals(parser.getCurrentName())) { @@ -108,10 +115,9 @@ private List readHosts(HttpEntity entity) throws IOException { JsonToken token = parser.nextToken(); assert token == JsonToken.START_OBJECT; String nodeId = parser.getCurrentName(); - HttpHost sniffedHost = readHost(nodeId, parser, this.scheme); - if (sniffedHost != null) { - logger.trace("adding node [" + nodeId + "]"); - hosts.add(sniffedHost); + Node node = readNode(nodeId, parser, scheme); + if (node != null) { + nodes.add(node); } } } else { @@ -119,13 +125,31 @@ private List readHosts(HttpEntity entity) throws IOException { } } } - return hosts; + return nodes; } } - private static HttpHost readHost(String nodeId, JsonParser parser, Scheme scheme) throws IOException { - HttpHost httpHost = null; + private static Node readNode(String nodeId, JsonParser parser, Scheme scheme) throws IOException { + HttpHost publishedHost = null; + /* + * We sniff the bound hosts so we can look up the node based on any + * address on which it is listening. This is useful in Elasticsearch's + * test framework where we sometimes publish ipv6 addresses but the + * tests contact the node on ipv4. + */ + Set boundHosts = new HashSet<>(); + String name = null; + String version = null; String fieldName = null; + // Used to read roles from 5.0+ + boolean sawRoles = false; + boolean master = false; + boolean data = false; + boolean ingest = false; + // Used to read roles from 2.x + Boolean masterAttribute = null; + Boolean dataAttribute = null; + boolean clientAttribute = false; while (parser.nextToken() != JsonToken.END_OBJECT) { if (parser.getCurrentToken() == JsonToken.FIELD_NAME) { fieldName = parser.getCurrentName(); @@ -133,9 +157,27 @@ private static HttpHost readHost(String nodeId, JsonParser parser, Scheme scheme if ("http".equals(fieldName)) { while (parser.nextToken() != JsonToken.END_OBJECT) { if (parser.getCurrentToken() == JsonToken.VALUE_STRING && "publish_address".equals(parser.getCurrentName())) { - URI boundAddressAsURI = URI.create(scheme + "://" + parser.getValueAsString()); - httpHost = new HttpHost(boundAddressAsURI.getHost(), boundAddressAsURI.getPort(), - boundAddressAsURI.getScheme()); + URI publishAddressAsURI = URI.create(scheme + "://" + parser.getValueAsString()); + publishedHost = new HttpHost(publishAddressAsURI.getHost(), publishAddressAsURI.getPort(), + publishAddressAsURI.getScheme()); + } else if (parser.currentToken() == JsonToken.START_ARRAY && "bound_address".equals(parser.getCurrentName())) { + while (parser.nextToken() != JsonToken.END_ARRAY) { + URI boundAddressAsURI = URI.create(scheme + "://" + parser.getValueAsString()); + boundHosts.add(new HttpHost(boundAddressAsURI.getHost(), boundAddressAsURI.getPort(), + boundAddressAsURI.getScheme())); + } + } else if (parser.getCurrentToken() == JsonToken.START_OBJECT) { + parser.skipChildren(); + } + } + } else if ("attributes".equals(fieldName)) { + while (parser.nextToken() != JsonToken.END_OBJECT) { + if (parser.getCurrentToken() == JsonToken.VALUE_STRING && "master".equals(parser.getCurrentName())) { + masterAttribute = toBoolean(parser.getValueAsString()); + } else if (parser.getCurrentToken() == JsonToken.VALUE_STRING && "data".equals(parser.getCurrentName())) { + dataAttribute = toBoolean(parser.getValueAsString()); + } else if (parser.getCurrentToken() == JsonToken.VALUE_STRING && "client".equals(parser.getCurrentName())) { + clientAttribute = toBoolean(parser.getValueAsString()); } else if (parser.getCurrentToken() == JsonToken.START_OBJECT) { parser.skipChildren(); } @@ -143,14 +185,55 @@ private static HttpHost readHost(String nodeId, JsonParser parser, Scheme scheme } else { parser.skipChildren(); } + } else if (parser.currentToken() == JsonToken.START_ARRAY) { + if ("roles".equals(fieldName)) { + sawRoles = true; + while (parser.nextToken() != JsonToken.END_ARRAY) { + switch (parser.getText()) { + case "master": + master = true; + break; + case "data": + data = true; + break; + case "ingest": + ingest = true; + break; + default: + logger.warn("unknown role [" + parser.getText() + "] on node [" + nodeId + "]"); + } + } + } else { + parser.skipChildren(); + } + } else if (parser.currentToken().isScalarValue()) { + if ("version".equals(fieldName)) { + version = parser.getText(); + } else if ("name".equals(fieldName)) { + name = parser.getText(); + } } } //http section is not present if http is not enabled on the node, ignore such nodes - if (httpHost == null) { + if (publishedHost == null) { logger.debug("skipping node [" + nodeId + "] with http disabled"); return null; + } else { + logger.trace("adding node [" + nodeId + "]"); + if (version.startsWith("2.")) { + /* + * 2.x doesn't send roles, instead we try to read them from + * attributes. + */ + master = masterAttribute == null ? false == clientAttribute : masterAttribute; + data = dataAttribute == null ? false == clientAttribute : dataAttribute; + } else { + assert sawRoles : "didn't see roles for [" + nodeId + "]"; + } + assert boundHosts.contains(publishedHost) : + "[" + nodeId + "] doesn't make sense! publishedHost should be in boundHosts"; + return new Node(publishedHost, boundHosts, name, version, new Roles(master, data, ingest)); } - return httpHost; } public enum Scheme { @@ -167,4 +250,15 @@ public String toString() { return name; } } + + private static boolean toBoolean(String string) { + switch (string) { + case "true": + return true; + case "false": + return false; + default: + throw new IllegalArgumentException("[" + string + "] is not a valid boolean"); + } + } } diff --git a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/NodesSniffer.java similarity index 85% rename from client/sniffer/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java rename to client/sniffer/src/main/java/org/elasticsearch/client/sniff/NodesSniffer.java index 9eb7b34425944..c22c18f6eae32 100644 --- a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java +++ b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/NodesSniffer.java @@ -19,7 +19,7 @@ package org.elasticsearch.client.sniff; -import org.apache.http.HttpHost; +import org.elasticsearch.client.Node; import java.io.IOException; import java.util.List; @@ -27,9 +27,9 @@ /** * Responsible for sniffing the http hosts */ -public interface HostsSniffer { +public interface NodesSniffer { /** - * Returns the sniffed http hosts + * Returns the sniffed Elasticsearch nodes. */ - List sniffHosts() throws IOException; + List sniff() throws IOException; } diff --git a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/SniffOnFailureListener.java b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/SniffOnFailureListener.java index cbc77351de98b..69095641644bb 100644 --- a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/SniffOnFailureListener.java +++ b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/SniffOnFailureListener.java @@ -20,6 +20,7 @@ package org.elasticsearch.client.sniff; import org.apache.http.HttpHost; +import org.elasticsearch.client.Node; import org.elasticsearch.client.RestClient; import java.util.Objects; @@ -54,11 +55,11 @@ public void setSniffer(Sniffer sniffer) { } @Override - public void onFailure(HttpHost host) { + public void onFailure(Node node) { if (sniffer == null) { throw new IllegalStateException("sniffer was not set, unable to sniff on failure"); } //re-sniff immediately but take out the node that failed - sniffer.sniffOnFailure(host); + sniffer.sniffOnFailure(node); } } diff --git a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java index c655babd9ed3d..bc2417cb8d8c8 100644 --- a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java +++ b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java @@ -22,6 +22,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.http.HttpHost; +import org.elasticsearch.client.Node; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClientBuilder; @@ -41,7 +42,7 @@ /** * Class responsible for sniffing nodes from some source (default is elasticsearch itself) and setting them to a provided instance of * {@link RestClient}. Must be created via {@link SnifferBuilder}, which allows to set all of the different options or rely on defaults. - * A background task fetches the nodes through the {@link HostsSniffer} and sets them to the {@link RestClient} instance. + * A background task fetches the nodes through the {@link NodesSniffer} and sets them to the {@link RestClient} instance. * It is possible to perform sniffing on failure by creating a {@link SniffOnFailureListener} and providing it as an argument to * {@link RestClientBuilder#setFailureListener(RestClient.FailureListener)}. The Sniffer implementation needs to be lazily set to the * previously created SniffOnFailureListener through {@link SniffOnFailureListener#setSniffer(Sniffer)}. @@ -53,15 +54,15 @@ public class Sniffer implements Closeable { private final Task task; - Sniffer(RestClient restClient, HostsSniffer hostsSniffer, long sniffInterval, long sniffAfterFailureDelay) { - this.task = new Task(hostsSniffer, restClient, sniffInterval, sniffAfterFailureDelay); + Sniffer(RestClient restClient, NodesSniffer nodesSniffer, long sniffInterval, long sniffAfterFailureDelay) { + this.task = new Task(nodesSniffer, restClient, sniffInterval, sniffAfterFailureDelay); } /** * Triggers a new sniffing round and explicitly takes out the failed host provided as argument */ - public void sniffOnFailure(HttpHost failedHost) { - this.task.sniffOnFailure(failedHost); + public void sniffOnFailure(Node failedNode) { + this.task.sniffOnFailure(failedNode); } @Override @@ -70,7 +71,7 @@ public void close() throws IOException { } private static class Task implements Runnable { - private final HostsSniffer hostsSniffer; + private final NodesSniffer nodesSniffer; private final RestClient restClient; private final long sniffIntervalMillis; @@ -79,8 +80,8 @@ private static class Task implements Runnable { private final AtomicBoolean running = new AtomicBoolean(false); private ScheduledFuture scheduledFuture; - private Task(HostsSniffer hostsSniffer, RestClient restClient, long sniffIntervalMillis, long sniffAfterFailureDelayMillis) { - this.hostsSniffer = hostsSniffer; + private Task(NodesSniffer nodesSniffer, RestClient restClient, long sniffIntervalMillis, long sniffAfterFailureDelayMillis) { + this.nodesSniffer = nodesSniffer; this.restClient = restClient; this.sniffIntervalMillis = sniffIntervalMillis; this.sniffAfterFailureDelayMillis = sniffAfterFailureDelayMillis; @@ -109,22 +110,24 @@ public void run() { sniff(null, sniffIntervalMillis); } - void sniffOnFailure(HttpHost failedHost) { - sniff(failedHost, sniffAfterFailureDelayMillis); + void sniffOnFailure(Node failedNode) { + sniff(failedNode, sniffAfterFailureDelayMillis); } - void sniff(HttpHost excludeHost, long nextSniffDelayMillis) { + void sniff(Node excludeNode, long nextSniffDelayMillis) { if (running.compareAndSet(false, true)) { try { - List sniffedHosts = hostsSniffer.sniffHosts(); - logger.debug("sniffed hosts: " + sniffedHosts); - if (excludeHost != null) { - sniffedHosts.remove(excludeHost); + final List sniffedNodes = nodesSniffer.sniff(); + if (logger.isDebugEnabled()) { + logger.debug("sniffed hosts: " + sniffedNodes); } - if (sniffedHosts.isEmpty()) { + if (excludeNode != null) { + sniffedNodes.remove(excludeNode); + } + if (sniffedNodes.isEmpty()) { logger.warn("no hosts to set, hosts will be updated at the next sniffing round"); } else { - this.restClient.setHosts(sniffedHosts.toArray(new HttpHost[sniffedHosts.size()])); + this.restClient.setNodes(sniffedNodes.toArray(new Node[sniffedNodes.size()])); } } catch (Exception e) { logger.error("error while sniffing nodes", e); diff --git a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/SnifferBuilder.java b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/SnifferBuilder.java index 010a8a4a78d20..48ca52d423012 100644 --- a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/SnifferBuilder.java +++ b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/SnifferBuilder.java @@ -34,7 +34,7 @@ public final class SnifferBuilder { private final RestClient restClient; private long sniffIntervalMillis = DEFAULT_SNIFF_INTERVAL; private long sniffAfterFailureDelayMillis = DEFAULT_SNIFF_AFTER_FAILURE_DELAY; - private HostsSniffer hostsSniffer; + private NodesSniffer nodesSniffer; /** * Creates a new builder instance by providing the {@link RestClient} that will be used to communicate with elasticsearch @@ -69,13 +69,13 @@ public SnifferBuilder setSniffAfterFailureDelayMillis(int sniffAfterFailureDelay } /** - * Sets the {@link HostsSniffer} to be used to read hosts. A default instance of {@link ElasticsearchHostsSniffer} - * is created when not provided. This method can be used to change the configuration of the {@link ElasticsearchHostsSniffer}, + * Sets the {@link NodesSniffer} to be used to read hosts. A default instance of {@link ElasticsearchNodesSniffer} + * is created when not provided. This method can be used to change the configuration of the {@link ElasticsearchNodesSniffer}, * or to provide a different implementation (e.g. in case hosts need to taken from a different source). */ - public SnifferBuilder setHostsSniffer(HostsSniffer hostsSniffer) { - Objects.requireNonNull(hostsSniffer, "hostsSniffer cannot be null"); - this.hostsSniffer = hostsSniffer; + public SnifferBuilder setNodesSniffer(NodesSniffer nodesSniffer) { + Objects.requireNonNull(nodesSniffer, "nodesSniffer cannot be null"); + this.nodesSniffer = nodesSniffer; return this; } @@ -83,9 +83,9 @@ public SnifferBuilder setHostsSniffer(HostsSniffer hostsSniffer) { * Creates the {@link Sniffer} based on the provided configuration. */ public Sniffer build() { - if (hostsSniffer == null) { - this.hostsSniffer = new ElasticsearchHostsSniffer(restClient); + if (nodesSniffer == null) { + this.nodesSniffer = new ElasticsearchNodesSniffer(restClient); } - return new Sniffer(restClient, hostsSniffer, sniffIntervalMillis, sniffAfterFailureDelayMillis); + return new Sniffer(restClient, nodesSniffer, sniffIntervalMillis, sniffAfterFailureDelayMillis); } } diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferParseTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferParseTests.java new file mode 100644 index 0000000000000..712a836a17b8a --- /dev/null +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferParseTests.java @@ -0,0 +1,109 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.sniff; + +import org.apache.http.HttpEntity; +import org.apache.http.HttpHost; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.InputStreamEntity; +import org.elasticsearch.client.Node; +import org.elasticsearch.client.RestClientTestCase; +import org.elasticsearch.client.Node.Roles; +import org.elasticsearch.client.sniff.ElasticsearchNodesSniffer.Scheme; + +import java.io.IOException; +import java.io.InputStream; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import com.fasterxml.jackson.core.JsonFactory; + +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.hasSize; +import static org.junit.Assert.assertThat; + +/** + * Test parsing the response from the {@code /_nodes/http} API from fixed + * versions of Elasticsearch. + */ +public class ElasticsearchNodesSnifferParseTests extends RestClientTestCase { + private void checkFile(String file, Node... expected) throws IOException { + InputStream in = Thread.currentThread().getContextClassLoader().getResourceAsStream(file); + if (in == null) { + throw new IllegalArgumentException("Couldn't find [" + file + "]"); + } + try { + HttpEntity entity = new InputStreamEntity(in, ContentType.APPLICATION_JSON); + List nodes = ElasticsearchNodesSniffer.readHosts(entity, Scheme.HTTP, new JsonFactory()); + // Use these assertions because the error messages are nicer than hasItems. + assertThat(nodes, hasSize(expected.length)); + for (Node expectedNode : expected) { + assertThat(nodes, hasItem(expectedNode)); + } + } finally { + in.close(); + } + } + + public void test2x() throws IOException { + checkFile("2.0.0_nodes_http.json", + node(9200, "m1", "2.0.0", true, false, false), + node(9202, "m2", "2.0.0", true, true, false), + node(9201, "m3", "2.0.0", true, false, false), + node(9205, "d1", "2.0.0", false, true, false), + node(9204, "d2", "2.0.0", false, true, false), + node(9203, "d3", "2.0.0", false, true, false), + node(9207, "c1", "2.0.0", false, false, false), + node(9206, "c2", "2.0.0", false, false, false)); + } + + public void test5x() throws IOException { + checkFile("5.0.0_nodes_http.json", + node(9200, "m1", "5.0.0", true, false, true), + node(9201, "m2", "5.0.0", true, true, true), + node(9202, "m3", "5.0.0", true, false, true), + node(9203, "d1", "5.0.0", false, true, true), + node(9204, "d2", "5.0.0", false, true, true), + node(9205, "d3", "5.0.0", false, true, true), + node(9206, "c1", "5.0.0", false, false, true), + node(9207, "c2", "5.0.0", false, false, true)); + } + + public void test6x() throws IOException { + checkFile("6.0.0_nodes_http.json", + node(9200, "m1", "6.0.0", true, false, true), + node(9201, "m2", "6.0.0", true, true, true), + node(9202, "m3", "6.0.0", true, false, true), + node(9203, "d1", "6.0.0", false, true, true), + node(9204, "d2", "6.0.0", false, true, true), + node(9205, "d3", "6.0.0", false, true, true), + node(9206, "c1", "6.0.0", false, false, true), + node(9207, "c2", "6.0.0", false, false, true)); + } + + private Node node(int port, String name, String version, boolean master, boolean data, boolean ingest) { + HttpHost host = new HttpHost("127.0.0.1", port); + Set boundHosts = new HashSet<>(2); + boundHosts.add(host); + boundHosts.add(new HttpHost("[::1]", port)); + return new Node(host, boundHosts, name, version, new Roles(master, data, ingest)); + } +} diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchHostsSnifferTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferTests.java similarity index 76% rename from client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchHostsSnifferTests.java rename to client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferTests.java index f13d175110434..d5492babefb7c 100644 --- a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchHostsSnifferTests.java +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferTests.java @@ -31,6 +31,7 @@ import org.apache.http.HttpHost; import org.apache.http.client.methods.HttpGet; import org.codehaus.mojo.animal_sniffer.IgnoreJRERequirement; +import org.elasticsearch.client.Node; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; @@ -45,10 +46,10 @@ import java.net.InetAddress; import java.net.InetSocketAddress; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; @@ -60,17 +61,17 @@ import static org.junit.Assert.assertThat; import static org.junit.Assert.fail; -public class ElasticsearchHostsSnifferTests extends RestClientTestCase { +public class ElasticsearchNodesSnifferTests extends RestClientTestCase { private int sniffRequestTimeout; - private ElasticsearchHostsSniffer.Scheme scheme; + private ElasticsearchNodesSniffer.Scheme scheme; private SniffResponse sniffResponse; private HttpServer httpServer; @Before public void startHttpServer() throws IOException { this.sniffRequestTimeout = RandomNumbers.randomIntBetween(getRandom(), 1000, 10000); - this.scheme = RandomPicks.randomFrom(getRandom(), ElasticsearchHostsSniffer.Scheme.values()); + this.scheme = RandomPicks.randomFrom(getRandom(), ElasticsearchNodesSniffer.Scheme.values()); if (rarely()) { this.sniffResponse = SniffResponse.buildFailure(); } else { @@ -87,7 +88,7 @@ public void stopHttpServer() throws IOException { public void testConstructorValidation() throws IOException { try { - new ElasticsearchHostsSniffer(null, 1, ElasticsearchHostsSniffer.Scheme.HTTP); + new ElasticsearchNodesSniffer(null, 1, ElasticsearchNodesSniffer.Scheme.HTTP); fail("should have failed"); } catch(NullPointerException e) { assertEquals("restClient cannot be null", e.getMessage()); @@ -95,14 +96,14 @@ public void testConstructorValidation() throws IOException { HttpHost httpHost = new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()); try (RestClient restClient = RestClient.builder(httpHost).build()) { try { - new ElasticsearchHostsSniffer(restClient, 1, null); + new ElasticsearchNodesSniffer(restClient, 1, null); fail("should have failed"); } catch (NullPointerException e) { assertEquals(e.getMessage(), "scheme cannot be null"); } try { - new ElasticsearchHostsSniffer(restClient, RandomNumbers.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0), - ElasticsearchHostsSniffer.Scheme.HTTP); + new ElasticsearchNodesSniffer(restClient, RandomNumbers.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0), + ElasticsearchNodesSniffer.Scheme.HTTP); fail("should have failed"); } catch (IllegalArgumentException e) { assertEquals(e.getMessage(), "sniffRequestTimeoutMillis must be greater than 0"); @@ -113,17 +114,13 @@ public void testConstructorValidation() throws IOException { public void testSniffNodes() throws IOException { HttpHost httpHost = new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()); try (RestClient restClient = RestClient.builder(httpHost).build()) { - ElasticsearchHostsSniffer sniffer = new ElasticsearchHostsSniffer(restClient, sniffRequestTimeout, scheme); + ElasticsearchNodesSniffer sniffer = new ElasticsearchNodesSniffer(restClient, sniffRequestTimeout, scheme); try { - List sniffedHosts = sniffer.sniffHosts(); + List sniffedNodes = sniffer.sniff(); if (sniffResponse.isFailure) { fail("sniffNodes should have failed"); } - assertThat(sniffedHosts.size(), equalTo(sniffResponse.hosts.size())); - Iterator responseHostsIterator = sniffResponse.hosts.iterator(); - for (HttpHost sniffedHost : sniffedHosts) { - assertEquals(sniffedHost, responseHostsIterator.next()); - } + assertEquals(sniffResponse.result, sniffedNodes); } catch(ResponseException e) { Response response = e.getResponse(); if (sniffResponse.isFailure) { @@ -176,9 +173,9 @@ public void handle(HttpExchange httpExchange) throws IOException { } } - private static SniffResponse buildSniffResponse(ElasticsearchHostsSniffer.Scheme scheme) throws IOException { + private static SniffResponse buildSniffResponse(ElasticsearchNodesSniffer.Scheme scheme) throws IOException { int numNodes = RandomNumbers.randomIntBetween(getRandom(), 1, 5); - List hosts = new ArrayList<>(numNodes); + List nodes = new ArrayList<>(numNodes); JsonFactory jsonFactory = new JsonFactory(); StringWriter writer = new StringWriter(); JsonGenerator generator = jsonFactory.createGenerator(writer); @@ -193,6 +190,23 @@ private static SniffResponse buildSniffResponse(ElasticsearchHostsSniffer.Scheme generator.writeObjectFieldStart("nodes"); for (int i = 0; i < numNodes; i++) { String nodeId = RandomStrings.randomAsciiOfLengthBetween(getRandom(), 5, 10); + String host = "host" + i; + int port = RandomNumbers.randomIntBetween(getRandom(), 9200, 9299); + HttpHost publishHost = new HttpHost(host, port, scheme.toString()); + Set boundHosts = new HashSet<>(); + boundHosts.add(publishHost); + + if (randomBoolean()) { + int bound = between(1, 5); + for (int b = 0; b < bound; b++) { + boundHosts.add(new HttpHost(host + b, port, scheme.toString())); + } + } + + Node node = new Node(publishHost, boundHosts, randomAsciiAlphanumOfLength(5), + randomAsciiAlphanumOfLength(5), + new Node.Roles(randomBoolean(), randomBoolean(), randomBoolean())); + generator.writeObjectFieldStart(nodeId); if (getRandom().nextBoolean()) { generator.writeObjectFieldStart("bogus_object"); @@ -206,44 +220,45 @@ private static SniffResponse buildSniffResponse(ElasticsearchHostsSniffer.Scheme } boolean isHttpEnabled = rarely() == false; if (isHttpEnabled) { - String host = "host" + i; - int port = RandomNumbers.randomIntBetween(getRandom(), 9200, 9299); - HttpHost httpHost = new HttpHost(host, port, scheme.toString()); - hosts.add(httpHost); + nodes.add(node); generator.writeObjectFieldStart("http"); - if (getRandom().nextBoolean()) { - generator.writeArrayFieldStart("bound_address"); - generator.writeString("[fe80::1]:" + port); - generator.writeString("[::1]:" + port); - generator.writeString("127.0.0.1:" + port); - generator.writeEndArray(); + generator.writeArrayFieldStart("bound_address"); + for (HttpHost bound : boundHosts) { + generator.writeString(bound.toHostString()); } + generator.writeEndArray(); if (getRandom().nextBoolean()) { generator.writeObjectFieldStart("bogus_object"); generator.writeEndObject(); } - generator.writeStringField("publish_address", httpHost.toHostString()); + generator.writeStringField("publish_address", publishHost.toHostString()); if (getRandom().nextBoolean()) { generator.writeNumberField("max_content_length_in_bytes", 104857600); } generator.writeEndObject(); } - if (getRandom().nextBoolean()) { - String[] roles = {"master", "data", "ingest"}; - int numRoles = RandomNumbers.randomIntBetween(getRandom(), 0, 3); - Set nodeRoles = new HashSet<>(numRoles); - for (int j = 0; j < numRoles; j++) { - String role; - do { - role = RandomPicks.randomFrom(getRandom(), roles); - } while(nodeRoles.add(role) == false); + + List roles = Arrays.asList(new String[] {"master", "data", "ingest"}); + Collections.shuffle(roles, getRandom()); + generator.writeArrayFieldStart("roles"); + for (String role : roles) { + if ("master".equals(role) && node.getRoles().isMasterEligible()) { + generator.writeString("master"); } - generator.writeArrayFieldStart("roles"); - for (String nodeRole : nodeRoles) { - generator.writeString(nodeRole); + if ("data".equals(role) && node.getRoles().isData()) { + generator.writeString("data"); + } + if ("ingest".equals(role) && node.getRoles().isIngest()) { + generator.writeString("ingest"); } - generator.writeEndArray(); } + generator.writeEndArray(); + + generator.writeFieldName("version"); + generator.writeString(node.getVersion()); + generator.writeFieldName("name"); + generator.writeString(node.getName()); + int numAttributes = RandomNumbers.randomIntBetween(getRandom(), 0, 3); Map attributes = new HashMap<>(numAttributes); for (int j = 0; j < numAttributes; j++) { @@ -263,18 +278,18 @@ private static SniffResponse buildSniffResponse(ElasticsearchHostsSniffer.Scheme generator.writeEndObject(); generator.writeEndObject(); generator.close(); - return SniffResponse.buildResponse(writer.toString(), hosts); + return SniffResponse.buildResponse(writer.toString(), nodes); } private static class SniffResponse { private final String nodesInfoBody; private final int nodesInfoResponseCode; - private final List hosts; + private final List result; private final boolean isFailure; - SniffResponse(String nodesInfoBody, List hosts, boolean isFailure) { + SniffResponse(String nodesInfoBody, List result, boolean isFailure) { this.nodesInfoBody = nodesInfoBody; - this.hosts = hosts; + this.result = result; this.isFailure = isFailure; if (isFailure) { this.nodesInfoResponseCode = randomErrorResponseCode(); @@ -284,11 +299,11 @@ private static class SniffResponse { } static SniffResponse buildFailure() { - return new SniffResponse("", Collections.emptyList(), true); + return new SniffResponse("", Collections.emptyList(), true); } - static SniffResponse buildResponse(String nodesInfoBody, List hosts) { - return new SniffResponse(nodesInfoBody, hosts, false); + static SniffResponse buildResponse(String nodesInfoBody, List nodes) { + return new SniffResponse(nodesInfoBody, nodes, false); } } diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/MockHostsSniffer.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/MockNodesSniffer.java similarity index 78% rename from client/sniffer/src/test/java/org/elasticsearch/client/sniff/MockHostsSniffer.java rename to client/sniffer/src/test/java/org/elasticsearch/client/sniff/MockNodesSniffer.java index 5a52151d76e01..136cbc5cf5d62 100644 --- a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/MockHostsSniffer.java +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/MockNodesSniffer.java @@ -20,17 +20,18 @@ package org.elasticsearch.client.sniff; import org.apache.http.HttpHost; +import org.elasticsearch.client.Node; import java.io.IOException; import java.util.Collections; import java.util.List; /** - * Mock implementation of {@link HostsSniffer}. Useful to prevent any connection attempt while testing builders etc. + * Mock implementation of {@link NodesSniffer}. Useful to prevent any connection attempt while testing builders etc. */ -class MockHostsSniffer implements HostsSniffer { +class MockNodesSniffer implements NodesSniffer { @Override - public List sniffHosts() throws IOException { - return Collections.singletonList(new HttpHost("localhost", 9200)); + public List sniff() throws IOException { + return Collections.singletonList(new Node(new HttpHost("localhost", 9200))); } } diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SniffOnFailureListenerTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SniffOnFailureListenerTests.java index 1fece270ffe0d..225bdb9a0097e 100644 --- a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SniffOnFailureListenerTests.java +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SniffOnFailureListenerTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.client.sniff; import org.apache.http.HttpHost; +import org.elasticsearch.client.Node; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClientTestCase; @@ -46,7 +47,7 @@ public void testSetSniffer() throws Exception { } try (RestClient restClient = RestClient.builder(new HttpHost("localhost", 9200)).build()) { - try (Sniffer sniffer = Sniffer.builder(restClient).setHostsSniffer(new MockHostsSniffer()).build()) { + try (Sniffer sniffer = Sniffer.builder(restClient).setNodesSniffer(new MockNodesSniffer()).build()) { listener.setSniffer(sniffer); try { listener.setSniffer(sniffer); @@ -54,7 +55,7 @@ public void testSetSniffer() throws Exception { } catch(IllegalStateException e) { assertEquals("sniffer can only be set once", e.getMessage()); } - listener.onFailure(new HttpHost("localhost", 9200)); + listener.onFailure(new Node(new HttpHost("localhost", 9200))); } } } diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java index 9a7359e9c7215..f924a9fbebc81 100644 --- a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java @@ -61,10 +61,10 @@ public void testBuild() throws Exception { try { - Sniffer.builder(client).setHostsSniffer(null); + Sniffer.builder(client).setNodesSniffer(null); fail("should have failed"); } catch(NullPointerException e) { - assertEquals("hostsSniffer cannot be null", e.getMessage()); + assertEquals("nodesSniffer cannot be null", e.getMessage()); } @@ -80,7 +80,7 @@ public void testBuild() throws Exception { builder.setSniffAfterFailureDelayMillis(RandomNumbers.randomIntBetween(getRandom(), 1, Integer.MAX_VALUE)); } if (getRandom().nextBoolean()) { - builder.setHostsSniffer(new MockHostsSniffer()); + builder.setNodesSniffer(new MockNodesSniffer()); } try (Sniffer sniffer = builder.build()) { diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/documentation/SnifferDocumentation.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/documentation/SnifferDocumentation.java index 199632d478f81..5f305024dba20 100644 --- a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/documentation/SnifferDocumentation.java +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/documentation/SnifferDocumentation.java @@ -20,9 +20,10 @@ package org.elasticsearch.client.sniff.documentation; import org.apache.http.HttpHost; +import org.elasticsearch.client.Node; import org.elasticsearch.client.RestClient; -import org.elasticsearch.client.sniff.ElasticsearchHostsSniffer; -import org.elasticsearch.client.sniff.HostsSniffer; +import org.elasticsearch.client.sniff.ElasticsearchNodesSniffer; +import org.elasticsearch.client.sniff.NodesSniffer; import org.elasticsearch.client.sniff.SniffOnFailureListener; import org.elasticsearch.client.sniff.Sniffer; @@ -91,12 +92,12 @@ public void testUsage() throws IOException { RestClient restClient = RestClient.builder( new HttpHost("localhost", 9200, "http")) .build(); - HostsSniffer hostsSniffer = new ElasticsearchHostsSniffer( + NodesSniffer nodesSniffer = new ElasticsearchNodesSniffer( restClient, - ElasticsearchHostsSniffer.DEFAULT_SNIFF_REQUEST_TIMEOUT, - ElasticsearchHostsSniffer.Scheme.HTTPS); + ElasticsearchNodesSniffer.DEFAULT_SNIFF_REQUEST_TIMEOUT, + ElasticsearchNodesSniffer.Scheme.HTTPS); Sniffer sniffer = Sniffer.builder(restClient) - .setHostsSniffer(hostsSniffer).build(); + .setNodesSniffer(nodesSniffer).build(); //end::sniffer-https } { @@ -104,28 +105,28 @@ public void testUsage() throws IOException { RestClient restClient = RestClient.builder( new HttpHost("localhost", 9200, "http")) .build(); - HostsSniffer hostsSniffer = new ElasticsearchHostsSniffer( + NodesSniffer nodesSniffer = new ElasticsearchNodesSniffer( restClient, TimeUnit.SECONDS.toMillis(5), - ElasticsearchHostsSniffer.Scheme.HTTP); + ElasticsearchNodesSniffer.Scheme.HTTP); Sniffer sniffer = Sniffer.builder(restClient) - .setHostsSniffer(hostsSniffer).build(); + .setNodesSniffer(nodesSniffer).build(); //end::sniff-request-timeout } { - //tag::custom-hosts-sniffer + //tag::custom-nodes-sniffer RestClient restClient = RestClient.builder( new HttpHost("localhost", 9200, "http")) .build(); - HostsSniffer hostsSniffer = new HostsSniffer() { + NodesSniffer nodesSniffer = new NodesSniffer() { @Override - public List sniffHosts() throws IOException { + public List sniff() throws IOException { return null; // <1> } }; Sniffer sniffer = Sniffer.builder(restClient) - .setHostsSniffer(hostsSniffer).build(); - //end::custom-hosts-sniffer + .setNodesSniffer(nodesSniffer).build(); + //end::custom-nodes-sniffer } } } diff --git a/client/sniffer/src/test/resources/2.0.0_nodes_http.json b/client/sniffer/src/test/resources/2.0.0_nodes_http.json new file mode 100644 index 0000000000000..b370e78e16011 --- /dev/null +++ b/client/sniffer/src/test/resources/2.0.0_nodes_http.json @@ -0,0 +1,141 @@ +{ + "cluster_name" : "elasticsearch", + "nodes" : { + "qYUZ_8bTRwODPxukDlFw6Q" : { + "name" : "d2", + "transport_address" : "127.0.0.1:9304", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "2.0.0", + "build" : "de54438", + "http_address" : "127.0.0.1:9204", + "attributes" : { + "master" : "false" + }, + "http" : { + "bound_address" : [ "127.0.0.1:9204", "[::1]:9204" ], + "publish_address" : "127.0.0.1:9204", + "max_content_length_in_bytes" : 104857600 + } + }, + "Yej5UVNgR2KgBjUFHOQpCw" : { + "name" : "c1", + "transport_address" : "127.0.0.1:9307", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "2.0.0", + "build" : "de54438", + "http_address" : "127.0.0.1:9207", + "attributes" : { + "data" : "false", + "master" : "false" + }, + "http" : { + "bound_address" : [ "127.0.0.1:9207", "[::1]:9207" ], + "publish_address" : "127.0.0.1:9207", + "max_content_length_in_bytes" : 104857600 + } + }, + "mHttJwhwReangKEx9EGuAg" : { + "name" : "m3", + "transport_address" : "127.0.0.1:9301", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "2.0.0", + "build" : "de54438", + "http_address" : "127.0.0.1:9201", + "attributes" : { + "data" : "false", + "master" : "true" + }, + "http" : { + "bound_address" : [ "127.0.0.1:9201", "[::1]:9201" ], + "publish_address" : "127.0.0.1:9201", + "max_content_length_in_bytes" : 104857600 + } + }, + "6Erdptt_QRGLxMiLi9mTkg" : { + "name" : "c2", + "transport_address" : "127.0.0.1:9306", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "2.0.0", + "build" : "de54438", + "http_address" : "127.0.0.1:9206", + "attributes" : { + "data" : "false", + "client" : "true" + }, + "http" : { + "bound_address" : [ "127.0.0.1:9206", "[::1]:9206" ], + "publish_address" : "127.0.0.1:9206", + "max_content_length_in_bytes" : 104857600 + } + }, + "mLRCZBypTiys6e8KY5DMnA" : { + "name" : "m1", + "transport_address" : "127.0.0.1:9300", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "2.0.0", + "build" : "de54438", + "http_address" : "127.0.0.1:9200", + "attributes" : { + "data" : "false" + }, + "http" : { + "bound_address" : [ "127.0.0.1:9200", "[::1]:9200" ], + "publish_address" : "127.0.0.1:9200", + "max_content_length_in_bytes" : 104857600 + } + }, + "pVqOhytXQwetsZVzCBppYw" : { + "name" : "m2", + "transport_address" : "127.0.0.1:9302", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "2.0.0", + "build" : "de54438", + "http_address" : "127.0.0.1:9202", + "http" : { + "bound_address" : [ "127.0.0.1:9202", "[::1]:9202" ], + "publish_address" : "127.0.0.1:9202", + "max_content_length_in_bytes" : 104857600 + } + }, + "ARyzVfpJSw2a9TOIUpbsBA" : { + "name" : "d1", + "transport_address" : "127.0.0.1:9305", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "2.0.0", + "build" : "de54438", + "http_address" : "127.0.0.1:9205", + "attributes" : { + "master" : "false" + }, + "http" : { + "bound_address" : [ "127.0.0.1:9205", "[::1]:9205" ], + "publish_address" : "127.0.0.1:9205", + "max_content_length_in_bytes" : 104857600 + } + }, + "2Hpid-g5Sc2BKCevhN6VQw" : { + "name" : "d3", + "transport_address" : "127.0.0.1:9303", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "2.0.0", + "build" : "de54438", + "http_address" : "127.0.0.1:9203", + "attributes" : { + "master" : "false" + }, + "http" : { + "bound_address" : [ "127.0.0.1:9203", "[::1]:9203" ], + "publish_address" : "127.0.0.1:9203", + "max_content_length_in_bytes" : 104857600 + } + } + } +} diff --git a/client/sniffer/src/test/resources/5.0.0_nodes_http.json b/client/sniffer/src/test/resources/5.0.0_nodes_http.json new file mode 100644 index 0000000000000..7a7d143ecaf43 --- /dev/null +++ b/client/sniffer/src/test/resources/5.0.0_nodes_http.json @@ -0,0 +1,169 @@ +{ + "_nodes" : { + "total" : 8, + "successful" : 8, + "failed" : 0 + }, + "cluster_name" : "test", + "nodes" : { + "DXz_rhcdSF2xJ96qyjaLVw" : { + "name" : "m1", + "transport_address" : "127.0.0.1:9300", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "5.0.0", + "build_hash" : "253032b", + "roles" : [ + "master", + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9200", + "127.0.0.1:9200" + ], + "publish_address" : "127.0.0.1:9200", + "max_content_length_in_bytes" : 104857600 + } + }, + "53Mi6jYdRgeR1cdyuoNfQQ" : { + "name" : "m2", + "transport_address" : "127.0.0.1:9301", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "5.0.0", + "build_hash" : "253032b", + "roles" : [ + "master", + "data", + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9201", + "127.0.0.1:9201" + ], + "publish_address" : "127.0.0.1:9201", + "max_content_length_in_bytes" : 104857600 + } + }, + "XBIghcHiRlWP9c4vY6rETw" : { + "name" : "c2", + "transport_address" : "127.0.0.1:9307", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "5.0.0", + "build_hash" : "253032b", + "roles" : [ + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9207", + "127.0.0.1:9207" + ], + "publish_address" : "127.0.0.1:9207", + "max_content_length_in_bytes" : 104857600 + } + }, + "cFM30FlyS8K1njH_bovwwQ" : { + "name" : "d1", + "transport_address" : "127.0.0.1:9303", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "5.0.0", + "build_hash" : "253032b", + "roles" : [ + "data", + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9203", + "127.0.0.1:9203" + ], + "publish_address" : "127.0.0.1:9203", + "max_content_length_in_bytes" : 104857600 + } + }, + "eoVUVRGNRDyyOapqIcrsIA" : { + "name" : "d2", + "transport_address" : "127.0.0.1:9304", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "5.0.0", + "build_hash" : "253032b", + "roles" : [ + "data", + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9204", + "127.0.0.1:9204" + ], + "publish_address" : "127.0.0.1:9204", + "max_content_length_in_bytes" : 104857600 + } + }, + "xPN76uDcTP-DyXaRzPg2NQ" : { + "name" : "c1", + "transport_address" : "127.0.0.1:9306", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "5.0.0", + "build_hash" : "253032b", + "roles" : [ + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9206", + "127.0.0.1:9206" + ], + "publish_address" : "127.0.0.1:9206", + "max_content_length_in_bytes" : 104857600 + } + }, + "RY0oW2d7TISEqazk-U4Kcw" : { + "name" : "d3", + "transport_address" : "127.0.0.1:9305", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "5.0.0", + "build_hash" : "253032b", + "roles" : [ + "data", + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9205", + "127.0.0.1:9205" + ], + "publish_address" : "127.0.0.1:9205", + "max_content_length_in_bytes" : 104857600 + } + }, + "tU0rXEZmQ9GsWfn2TQ4kow" : { + "name" : "m3", + "transport_address" : "127.0.0.1:9302", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "5.0.0", + "build_hash" : "253032b", + "roles" : [ + "master", + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9202", + "127.0.0.1:9202" + ], + "publish_address" : "127.0.0.1:9202", + "max_content_length_in_bytes" : 104857600 + } + } + } +} diff --git a/client/sniffer/src/test/resources/6.0.0_nodes_http.json b/client/sniffer/src/test/resources/6.0.0_nodes_http.json new file mode 100644 index 0000000000000..5a8905da64c89 --- /dev/null +++ b/client/sniffer/src/test/resources/6.0.0_nodes_http.json @@ -0,0 +1,169 @@ +{ + "_nodes" : { + "total" : 8, + "successful" : 8, + "failed" : 0 + }, + "cluster_name" : "test", + "nodes" : { + "FX9npqGQSL2mOGF8Zkf3hw" : { + "name" : "m2", + "transport_address" : "127.0.0.1:9301", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "6.0.0", + "build_hash" : "8f0685b", + "roles" : [ + "master", + "data", + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9201", + "127.0.0.1:9201" + ], + "publish_address" : "127.0.0.1:9201", + "max_content_length_in_bytes" : 104857600 + } + }, + "jmUqzYLGTbWCg127kve3Tg" : { + "name" : "d1", + "transport_address" : "127.0.0.1:9303", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "6.0.0", + "build_hash" : "8f0685b", + "roles" : [ + "data", + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9203", + "127.0.0.1:9203" + ], + "publish_address" : "127.0.0.1:9203", + "max_content_length_in_bytes" : 104857600 + } + }, + "soBU6bzvTOqdLxPstSbJ2g" : { + "name" : "d3", + "transport_address" : "127.0.0.1:9305", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "6.0.0", + "build_hash" : "8f0685b", + "roles" : [ + "data", + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9205", + "127.0.0.1:9205" + ], + "publish_address" : "127.0.0.1:9205", + "max_content_length_in_bytes" : 104857600 + } + }, + "mtYDAhURTP6twdmNAkMnOg" : { + "name" : "m3", + "transport_address" : "127.0.0.1:9302", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "6.0.0", + "build_hash" : "8f0685b", + "roles" : [ + "master", + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9202", + "127.0.0.1:9202" + ], + "publish_address" : "127.0.0.1:9202", + "max_content_length_in_bytes" : 104857600 + } + }, + "URxHiUQPROOt1G22Ev6lXw" : { + "name" : "c2", + "transport_address" : "127.0.0.1:9307", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "6.0.0", + "build_hash" : "8f0685b", + "roles" : [ + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9207", + "127.0.0.1:9207" + ], + "publish_address" : "127.0.0.1:9207", + "max_content_length_in_bytes" : 104857600 + } + }, + "_06S_kWoRqqFR8Z8CS3JRw" : { + "name" : "c1", + "transport_address" : "127.0.0.1:9306", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "6.0.0", + "build_hash" : "8f0685b", + "roles" : [ + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9206", + "127.0.0.1:9206" + ], + "publish_address" : "127.0.0.1:9206", + "max_content_length_in_bytes" : 104857600 + } + }, + "QZE5Bd6DQJmnfVs2dglOvA" : { + "name" : "d2", + "transport_address" : "127.0.0.1:9304", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "6.0.0", + "build_hash" : "8f0685b", + "roles" : [ + "data", + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9204", + "127.0.0.1:9204" + ], + "publish_address" : "127.0.0.1:9204", + "max_content_length_in_bytes" : 104857600 + } + }, + "_3mTXg6dSweZn5ReB2fQqw" : { + "name" : "m1", + "transport_address" : "127.0.0.1:9300", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "6.0.0", + "build_hash" : "8f0685b", + "roles" : [ + "master", + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9200", + "127.0.0.1:9200" + ], + "publish_address" : "127.0.0.1:9200", + "max_content_length_in_bytes" : 104857600 + } + } + } +} diff --git a/client/sniffer/src/test/resources/readme.txt b/client/sniffer/src/test/resources/readme.txt new file mode 100644 index 0000000000000..d3a68f2d41da3 --- /dev/null +++ b/client/sniffer/src/test/resources/readme.txt @@ -0,0 +1,4 @@ +`*_node_http.json` contains files created by spining up toy clusters with a +few nodes in different configurations locally at various versions. They are +for testing `ElasticsearchNodesSniffer` against different versions of +Elasticsearch. diff --git a/docs/java-rest/low-level/sniffer.asciidoc b/docs/java-rest/low-level/sniffer.asciidoc index 4f846847615ea..1ffaa519cfb50 100644 --- a/docs/java-rest/low-level/sniffer.asciidoc +++ b/docs/java-rest/low-level/sniffer.asciidoc @@ -55,7 +55,7 @@ dependencies { Once a `RestClient` instance has been created as shown in <>, a `Sniffer` can be associated to it. The `Sniffer` will make use of the provided `RestClient` to periodically (every 5 minutes by default) fetch the list of current nodes from the cluster -and update them by calling `RestClient#setHosts`. +and update them by calling `RestClient#setNodes`. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- @@ -105,7 +105,7 @@ on failure is not enabled like explained above. The Elasticsearch Nodes Info api doesn't return the protocol to use when connecting to the nodes but only their `host:port` key-pair, hence `http` is used by default. In case `https` should be used instead, the -`ElasticsearchHostsSniffer` instance has to be manually created and provided +`ElasticsearchNodesSniffer` instance has to be manually created and provided as follows: ["source","java",subs="attributes,callouts,macros"] @@ -125,12 +125,12 @@ cluster, the ones that have responded until then. include-tagged::{doc-tests}/SnifferDocumentation.java[sniff-request-timeout] -------------------------------------------------- -Also, a custom `HostsSniffer` implementation can be provided for advanced -use-cases that may require fetching the hosts from external sources rather +Also, a custom `NodesSniffer` implementation can be provided for advanced +use-cases that may require fetching the `Node`s from external sources rather than from Elasticsearch: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/SnifferDocumentation.java[custom-hosts-sniffer] +include-tagged::{doc-tests}/SnifferDocumentation.java[custom-nodes-sniffer] -------------------------------------------------- <1> Fetch the hosts from the external source diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc b/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc index c93873a5be429..c2259c7b55d14 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc @@ -197,6 +197,24 @@ header. The warnings must match exactly. Using it looks like this: id: 1 .... +If the arguments to `do` include `node_selector` then the request is only +sent to nodes that match the `node_selector`. Currently only the `version` +selector is supported and it has the same logic as the `version` field in +`skip`. It looks like this: + +.... +"test id": + - skip: + features: node_selector + - do: + node_selector: + version: " - 6.9.99" + index: + index: test-weird-index-中文 + type: weird.type + id: 1 + body: { foo: bar } +.... === `set` diff --git a/test/framework/build.gradle b/test/framework/build.gradle index 193fcb30988c6..5f1bc524da599 100644 --- a/test/framework/build.gradle +++ b/test/framework/build.gradle @@ -21,6 +21,7 @@ import org.elasticsearch.gradle.precommit.PrecommitTasks; dependencies { compile "org.elasticsearch.client:elasticsearch-rest-client:${version}" + compile "org.elasticsearch.client:elasticsearch-rest-client-sniffer:${version}" compile "org.elasticsearch:elasticsearch-nio:${version}" compile "org.elasticsearch:elasticsearch:${version}" compile "org.elasticsearch:elasticsearch-cli:${version}" diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlDocsTestClient.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlDocsTestClient.java index dacd67ccadc32..f84fd25f166eb 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlDocsTestClient.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlDocsTestClient.java @@ -22,6 +22,8 @@ import org.apache.http.HttpEntity; import org.apache.http.HttpHost; import org.elasticsearch.Version; +import org.elasticsearch.client.NodeSelector; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; @@ -45,22 +47,30 @@ public ClientYamlDocsTestClient(ClientYamlSuiteRestSpec restSpec, RestClient res super(restSpec, restClient, hosts, esVersion); } - public ClientYamlTestResponse callApi(String apiName, Map params, HttpEntity entity, Map headers) - throws IOException { + @Override + public ClientYamlTestResponse callApi(String apiName, Map params, HttpEntity entity, + Map headers, NodeSelector nodeSelector) throws IOException { if ("raw".equals(apiName)) { - // Raw requests are bit simpler.... + // Raw requests don't use the rest spec at all and are configured entirely by their parameters Map queryStringParams = new HashMap<>(params); String method = Objects.requireNonNull(queryStringParams.remove("method"), "Method must be set to use raw request"); String path = "/" + Objects.requireNonNull(queryStringParams.remove("path"), "Path must be set to use raw request"); - // And everything else is a url parameter! + Request request = new Request(method, path); + // All other parameters are url parameters + for (Map.Entry param : queryStringParams.entrySet()) { + request.addParameter(param.getKey(), param.getValue()); + } + request.setEntity(entity); + request.setHeaders(buildHeaders(headers)); + request.setNodeSelector(nodeSelector); try { - Response response = restClient.performRequest(method, path, queryStringParams, entity); + Response response = restClient.performRequest(request); return new ClientYamlTestResponse(response); } catch (ResponseException e) { throw new ClientYamlTestResponseException(e); } } - return super.callApi(apiName, params, entity, headers); + return super.callApi(apiName, params, entity, headers, nodeSelector); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java index 16aa9c428d7e9..eca3aabd02045 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java @@ -28,6 +28,8 @@ import org.apache.http.util.EntityUtils; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; +import org.elasticsearch.client.NodeSelector; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; @@ -75,8 +77,8 @@ public Version getEsVersion() { /** * Calls an api with the provided parameters and body */ - public ClientYamlTestResponse callApi(String apiName, Map params, HttpEntity entity, Map headers) - throws IOException { + public ClientYamlTestResponse callApi(String apiName, Map params, HttpEntity entity, + Map headers, NodeSelector nodeSelector) throws IOException { ClientYamlSuiteRestApi restApi = restApi(apiName); @@ -161,22 +163,34 @@ public ClientYamlTestResponse callApi(String apiName, Map params requestPath = finalPath.toString(); } - Header[] requestHeaders = new Header[headers.size()]; - int index = 0; - for (Map.Entry header : headers.entrySet()) { - logger.debug("Adding header {} with value {}", header.getKey(), header.getValue()); - requestHeaders[index++] = new BasicHeader(header.getKey(), header.getValue()); - } + logger.debug("calling api [{}]", apiName); + Request request = new Request(requestMethod, requestPath); + for (Map.Entry param : queryStringParams.entrySet()) { + request.addParameter(param.getKey(), param.getValue()); + } + request.setEntity(entity); + request.setHeaders(buildHeaders(headers)); + request.setNodeSelector(nodeSelector); try { - Response response = restClient.performRequest(requestMethod, requestPath, queryStringParams, entity, requestHeaders); + Response response = restClient.performRequest(request); return new ClientYamlTestResponse(response); } catch(ResponseException e) { throw new ClientYamlTestResponseException(e); } } + protected static Header[] buildHeaders(Map headers) { + Header[] requestHeaders = new Header[headers.size()]; + int index = 0; + for (Map.Entry header : headers.entrySet()) { + logger.debug("Adding header {} with value {}", header.getKey(), header.getValue()); + requestHeaders[index++] = new BasicHeader(header.getKey(), header.getValue()); + } + return requestHeaders; + } + private static boolean sendBodyAsSourceParam(List supportedMethods, String contentType, long contentLength) { if (false == supportedMethods.contains(HttpGet.METHOD_NAME)) { // The API doesn't claim to support GET anyway diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java index ca04c0c53d12a..633efc8a6e4c1 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java @@ -25,6 +25,8 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; +import org.elasticsearch.client.NodeSelector; +import org.elasticsearch.common.CheckedRunnable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -56,9 +58,12 @@ public class ClientYamlTestExecutionContext { private ClientYamlTestResponse response; private final boolean randomizeContentType; + private final CheckedRunnable setNodeMetadata; - ClientYamlTestExecutionContext(ClientYamlTestClient clientYamlTestClient, boolean randomizeContentType) { + ClientYamlTestExecutionContext(ClientYamlTestClient clientYamlTestClient, + CheckedRunnable setNodeMetadata, boolean randomizeContentType) { this.clientYamlTestClient = clientYamlTestClient; + this.setNodeMetadata = setNodeMetadata; this.randomizeContentType = randomizeContentType; } @@ -68,6 +73,15 @@ public class ClientYamlTestExecutionContext { */ public ClientYamlTestResponse callApi(String apiName, Map params, List> bodies, Map headers) throws IOException { + return callApi(apiName, params, bodies, headers, NodeSelector.ANY); + } + + /** + * Calls an elasticsearch api with the parameters and request body provided as arguments. + * Saves the obtained response in the execution context. + */ + public ClientYamlTestResponse callApi(String apiName, Map params, List> bodies, + Map headers, NodeSelector nodeSelector) throws IOException { //makes a copy of the parameters before modifying them for this specific request Map requestParams = new HashMap<>(params); requestParams.putIfAbsent("error_trace", "true"); // By default ask for error traces, this my be overridden by params @@ -85,9 +99,13 @@ public ClientYamlTestResponse callApi(String apiName, Map params } } + if (nodeSelector != NodeSelector.ANY) { + setNodeMetadata.run(); + } + HttpEntity entity = createEntity(bodies, requestHeaders); try { - response = callApiInternal(apiName, requestParams, entity, requestHeaders); + response = callApiInternal(apiName, requestParams, entity, requestHeaders, nodeSelector); return response; } catch(ClientYamlTestResponseException e) { response = e.getRestTestResponse(); @@ -153,9 +171,9 @@ private BytesRef bodyAsBytesRef(Map bodyAsMap, XContentType xCon } // pkg-private for testing - ClientYamlTestResponse callApiInternal(String apiName, Map params, - HttpEntity entity, Map headers) throws IOException { - return clientYamlTestClient.callApi(apiName, params, entity, headers); + ClientYamlTestResponse callApiInternal(String apiName, Map params, HttpEntity entity, + Map headers, NodeSelector nodeSelector) throws IOException { + return clientYamlTestClient.callApi(apiName, params, entity, headers, nodeSelector); } /** diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java index 950bb14eed9af..98b72d43bf73a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java @@ -22,9 +22,11 @@ import com.carrotsearch.randomizedtesting.RandomizedTest; import org.apache.http.HttpHost; import org.elasticsearch.Version; +import org.elasticsearch.client.Node; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; +import org.elasticsearch.client.sniff.ElasticsearchNodesSniffer; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.PathUtils; @@ -47,8 +49,10 @@ import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Set; +import java.util.stream.Collectors; /** * Runs a suite of yaml tests shared with all the official Elasticsearch clients against against an elasticsearch cluster. @@ -121,8 +125,10 @@ public void initAndResetContext() throws Exception { } } ClientYamlTestClient clientYamlTestClient = initClientYamlTestClient(restSpec, restClient, hosts, esVersion); - restTestExecutionContext = new ClientYamlTestExecutionContext(clientYamlTestClient, randomizeContentType()); - adminExecutionContext = new ClientYamlTestExecutionContext(clientYamlTestClient, false); + restTestExecutionContext = new ClientYamlTestExecutionContext(clientYamlTestClient, + () -> sniffHostMetadata(client()), randomizeContentType()); + adminExecutionContext = new ClientYamlTestExecutionContext(clientYamlTestClient, + () -> sniffHostMetadata(adminClient()), false); String[] blacklist = resolvePathsProperty(REST_TESTS_BLACKLIST, null); blacklistPathMatchers = new ArrayList<>(); for (String entry : blacklist) { @@ -367,4 +373,76 @@ private String errorMessage(ExecutableSection executableSection, Throwable t) { protected boolean randomizeContentType() { return true; } + + /** + * Sniff the cluster for host metadata if it hasn't already been sniffed. This isn't the + * same thing as using the {@link Sniffer} because: + *

    + *
  • It doesn't replace the hosts that that {@link #client} communicates with + *
  • If there is already host metadata it skips running. This behavior isn't + * thread safe but it doesn't have to be for our tests. + *
+ */ + private void sniffHostMetadata(RestClient client) throws IOException { + List nodes = client.getNodes(); + boolean allHaveRoles = true; + for (Node node : nodes) { + if (node.getRoles() == null) { + allHaveRoles = false; + break; + } + } + if (allHaveRoles) { + // We already have resolved metadata. + return; + } + // No resolver, sniff one time and resolve metadata against the results + ElasticsearchNodesSniffer.Scheme scheme = + ElasticsearchNodesSniffer.Scheme.valueOf(getProtocol().toUpperCase(Locale.ROOT)); + /* + * We don't want to change the list of nodes that the client communicates with + * because that'd just be rude. So instead we replace the nodes find the nodes + * returned by the sniffer that correspond with the nodes already the client + * and set the nodes to them. That *shouldn't* change the nodes that the client + * communicates with. + */ + ElasticsearchNodesSniffer sniffer = new ElasticsearchNodesSniffer( + adminClient(), ElasticsearchNodesSniffer.DEFAULT_SNIFF_REQUEST_TIMEOUT, scheme); + attachSniffedMetadataOnClient(client, nodes, sniffer.sniff()); + } + + static void attachSniffedMetadataOnClient(RestClient client, List originalNodes, List nodesWithMetadata) { + Set originalHosts = originalNodes.stream() + .map(Node::getHost) + .collect(Collectors.toSet()); + List sniffed = new ArrayList<>(); + for (Node node : nodesWithMetadata) { + /* + * getHost is the publish_address of the node which, sometimes, is + * ipv6 and, sometimes, our original address for the node is ipv4. + * In that case the ipv4 address should be in getBoundHosts. If it + * isn't then we'll end up without the right number of hosts which + * will fail down below with a pretty error message. + */ + if (originalHosts.contains(node.getHost())) { + sniffed.add(node); + } else { + for (HttpHost bound : node.getBoundHosts()) { + if (originalHosts.contains(bound)) { + sniffed.add(node.withHost(bound)); + break; + } + } + } + } + int missing = originalNodes.size() - sniffed.size(); + if (missing > 0) { + List hosts = originalNodes.stream() + .map(Node::getHost) + .collect(Collectors.toList()); + throw new IllegalStateException("Didn't sniff metadata for all nodes. Wanted metadata for " + + hosts + " but got " + sniffed); + } + client.setNodes(sniffed.toArray(new Node[0])); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java index ab9be65514a96..31fa59857cfe2 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java @@ -39,6 +39,7 @@ public final class Features { "catch_unauthorized", "embedded_stash_key", "headers", + "node_selector", "stash_in_key", "stash_in_path", "stash_path_replace", diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/package-info.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/package-info.java deleted file mode 100644 index de63b46eff313..0000000000000 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/** - * Parses YAML test {@link org.elasticsearch.test.rest.yaml.section.ClientYamlTestSuite}s containing - * {@link org.elasticsearch.test.rest.yaml.section.ClientYamlTestSection}s. - */ -package org.elasticsearch.test.rest.yaml.parser; diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ApiCallSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ApiCallSection.java index 4553845458541..de73fefaea776 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ApiCallSection.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ApiCallSection.java @@ -24,6 +24,8 @@ import java.util.List; import java.util.Map; +import org.elasticsearch.client.NodeSelector; + import static java.util.Collections.unmodifiableMap; /** @@ -35,6 +37,7 @@ public class ApiCallSection { private final Map params = new HashMap<>(); private final Map headers = new HashMap<>(); private final List> bodies = new ArrayList<>(); + private NodeSelector nodeSelector = NodeSelector.ANY; public ApiCallSection(String api) { this.api = api; @@ -76,4 +79,18 @@ public void addBody(Map body) { public boolean hasBody() { return bodies.size() > 0; } + + /** + * Selects the node on which to run this request. + */ + public NodeSelector getNodeSelector() { + return nodeSelector; + } + + /** + * Set the selector that decides which node can run this request. + */ + public void setNodeSelector(NodeSelector nodeSelector) { + this.nodeSelector = nodeSelector; + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSection.java index 321d22ed70aa7..1ec2382fac596 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSection.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSection.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.test.rest.yaml.section; +import org.elasticsearch.client.NodeSelector; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.XContentParser; @@ -91,6 +92,12 @@ public void addExecutableSection(ExecutableSection executableSection) { + "runners that do not support the [warnings] section can skip the test at line [" + doSection.getLocation().lineNumber + "]"); } + if (NodeSelector.ANY != doSection.getApiCallSection().getNodeSelector() + && false == skipSection.getFeatures().contains("node_selector")) { + throw new IllegalArgumentException("Attempted to add a [do] with a [node_selector] section without a corresponding " + + "[skip] so runners that do not support the [node_selector] section can skip the test at line [" + + doSection.getLocation().lineNumber + "]"); + } } this.executableSections.add(executableSection); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java index 7c6647d65f044..210e88704eafd 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java @@ -20,6 +20,9 @@ package org.elasticsearch.test.rest.yaml.section; import org.apache.logging.log4j.Logger; +import org.elasticsearch.Version; +import org.elasticsearch.client.Node; +import org.elasticsearch.client.NodeSelector; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; @@ -84,6 +87,7 @@ public static DoSection parse(XContentParser parser) throws IOException { DoSection doSection = new DoSection(parser.getTokenLocation()); ApiCallSection apiCallSection = null; + NodeSelector nodeSelector = NodeSelector.ANY; Map headers = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); List expectedWarnings = new ArrayList<>(); @@ -120,6 +124,18 @@ public static DoSection parse(XContentParser parser) throws IOException { headers.put(headerName, parser.text()); } } + } else if ("node_selector".equals(currentFieldName)) { + String selectorName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + selectorName = parser.currentName(); + } else if (token.isValue()) { + NodeSelector newSelector = buildNodeSelector( + parser.getTokenLocation(), selectorName, parser.text()); + nodeSelector = nodeSelector == NodeSelector.ANY ? + newSelector : new NodeSelector.Compose(nodeSelector, newSelector); + } + } } else if (currentFieldName != null) { // must be part of API call then apiCallSection = new ApiCallSection(currentFieldName); String paramName = null; @@ -152,6 +168,7 @@ public static DoSection parse(XContentParser parser) throws IOException { throw new IllegalArgumentException("client call section is mandatory within a do section"); } apiCallSection.addHeaders(headers); + apiCallSection.setNodeSelector(nodeSelector); doSection.setApiCallSection(apiCallSection); doSection.setExpectedWarningHeaders(unmodifiableList(expectedWarnings)); } finally { @@ -221,7 +238,7 @@ public void execute(ClientYamlTestExecutionContext executionContext) throws IOEx try { ClientYamlTestResponse response = executionContext.callApi(apiCallSection.getApi(), apiCallSection.getParams(), - apiCallSection.getBodies(), apiCallSection.getHeaders()); + apiCallSection.getBodies(), apiCallSection.getHeaders(), apiCallSection.getNodeSelector()); if (Strings.hasLength(catchParam)) { String catchStatusCode; if (catches.containsKey(catchParam)) { @@ -337,4 +354,35 @@ private String formatStatusCodeMessage(ClientYamlTestResponse restTestResponse, not(equalTo(408)), not(equalTo(409))))); } + + private static NodeSelector buildNodeSelector(XContentLocation location, String name, String value) { + switch (name) { + case "version": + Version[] range = SkipSection.parseVersionRange(value); + return new NodeSelector() { + @Override + public List select(List nodes) { + List result = new ArrayList<>(nodes.size()); + for (Node node : nodes) { + if (node.getVersion() == null) { + throw new IllegalStateException("expected [version] metadata to be set but got " + + node); + } + Version version = Version.fromString(node.getVersion()); + if (version.onOrAfter(range[0]) && version.onOrBefore(range[1])) { + result.add(node); + } + } + return result; + } + + @Override + public String toString() { + return "version between [" + range[0] + "] and [" + range[1] + "]"; + } + }; + default: + throw new IllegalArgumentException("unknown node_selector [" + name + "]"); + } + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/SkipSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/SkipSection.java index eb1fea4b79aed..e487f8e74da3b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/SkipSection.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/SkipSection.java @@ -153,7 +153,7 @@ public boolean isEmpty() { return EMPTY.equals(this); } - private Version[] parseVersionRange(String versionRange) { + static Version[] parseVersionRange(String versionRange) { if (versionRange == null) { return new Version[] { null, null }; } diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java index 2150baf59eab0..90d8c68775655 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java @@ -20,12 +20,17 @@ package org.elasticsearch.test.rest.yaml; import org.apache.http.HttpEntity; +import org.elasticsearch.client.NodeSelector; import org.elasticsearch.test.ESTestCase; +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; + import java.io.IOException; import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; public class ClientYamlTestExecutionContextTests extends ESTestCase { @@ -33,11 +38,10 @@ public class ClientYamlTestExecutionContextTests extends ESTestCase { public void testHeadersSupportStashedValueReplacement() throws IOException { final AtomicReference> headersRef = new AtomicReference<>(); final ClientYamlTestExecutionContext context = - new ClientYamlTestExecutionContext(null, randomBoolean()) { + new ClientYamlTestExecutionContext(null, () -> {}, randomBoolean()) { @Override ClientYamlTestResponse callApiInternal(String apiName, Map params, - HttpEntity entity, - Map headers) { + HttpEntity entity, Map headers, NodeSelector nodeSelector) { headersRef.set(headers); return null; } @@ -57,4 +61,20 @@ ClientYamlTestResponse callApiInternal(String apiName, Map param assertEquals("foo2", headersRef.get().get("foo")); assertEquals("baz bar1", headersRef.get().get("foo1")); } + + public void testNonDefaultNodeSelectorSetsNodeMetadata() throws IOException { + AtomicBoolean setHostMetadata = new AtomicBoolean(false); + final ClientYamlTestExecutionContext context = + new ClientYamlTestExecutionContext(null, () -> setHostMetadata.set(true), randomBoolean()) { + @Override + ClientYamlTestResponse callApiInternal(String apiName, Map params, + HttpEntity entity, Map headers, NodeSelector nodeSelector) { + return null; + } + }; + context.callApi(randomAlphaOfLength(2), emptyMap(), emptyList(), emptyMap(), NodeSelector.ANY); + assertFalse(setHostMetadata.get()); + context.callApi(randomAlphaOfLength(2), emptyMap(), emptyList(), emptyMap(), NodeSelector.NOT_MASTER_ONLY); + assertTrue(setHostMetadata.get()); + } } diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCaseTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCaseTests.java index ae64dbc893d81..9ba003f7405bb 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCaseTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCaseTests.java @@ -20,14 +20,25 @@ import java.nio.file.Files; import java.nio.file.Path; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; +import org.apache.http.HttpHost; +import org.elasticsearch.client.Node; +import org.elasticsearch.client.RestClient; import org.elasticsearch.test.ESTestCase; +import static java.util.Collections.emptySet; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.notNullValue; import static org.hamcrest.Matchers.greaterThan; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; public class ESClientYamlSuiteTestCaseTests extends ESTestCase { @@ -91,4 +102,56 @@ private static void assertSingleFile(Set files, String dirName, String fil assertThat(file.getFileName().toString(), equalTo(fileName)); assertThat(file.toAbsolutePath().getParent().getFileName().toString(), equalTo(dirName)); } + + public void testAttachSniffedMetadataOnClientOk() { + RestClient client = mock(RestClient.class); + List originalNodes = Arrays.asList( + new Node(new HttpHost("1")), + new Node(new HttpHost("2")), + new Node(new HttpHost("3"))); + List nodesWithMetadata = Arrays.asList(new Node[] { + // This node matches exactly: + new Node(new HttpHost("1"), emptySet(), randomAlphaOfLength(5), + randomAlphaOfLength(5), randomRoles()), + // This node also matches exactly but has bound hosts which don't matter: + new Node(new HttpHost("2"), + new HashSet<>(Arrays.asList(new HttpHost("2"), new HttpHost("not2"))), + randomAlphaOfLength(5), randomAlphaOfLength(5), randomRoles()), + // This node's host doesn't match but one of its published hosts does so + // we return a modified version of it: + new Node(new HttpHost("not3"), + new HashSet<>(Arrays.asList(new HttpHost("not3"), new HttpHost("3"))), + randomAlphaOfLength(5), randomAlphaOfLength(5), randomRoles()), + // This node isn't in the original list so it isn't added: + new Node(new HttpHost("4"), emptySet(), randomAlphaOfLength(5), + randomAlphaOfLength(5), randomRoles()), + }); + ESClientYamlSuiteTestCase.attachSniffedMetadataOnClient(client, originalNodes, nodesWithMetadata); + verify(client).setNodes(new Node[] { + nodesWithMetadata.get(0), + nodesWithMetadata.get(1), + nodesWithMetadata.get(2).withHost(new HttpHost("3")), + }); + } + + public void testAttachSniffedMetadataOnClientNotEnoughNodes() { + // Try a version of the call that should fail because it doesn't have all the results + RestClient client = mock(RestClient.class); + List originalNodes = Arrays.asList( + new Node(new HttpHost("1")), + new Node(new HttpHost("2"))); + List nodesWithMetadata = Arrays.asList(new Node[] { + // This node matches exactly: + new Node(new HttpHost("1"), emptySet(), "n", "v", new Node.Roles(true, true, true)), + }); + IllegalStateException e = expectThrows(IllegalStateException.class, () -> + ESClientYamlSuiteTestCase.attachSniffedMetadataOnClient(client, originalNodes, nodesWithMetadata)); + assertEquals(e.getMessage(), "Didn't sniff metadata for all nodes. Wanted metadata for " + + "[http://1, http://2] but got [[host=http://1, bound=[], name=n, version=v, roles=mdi]]"); + verify(client, never()).setNodes(any(Node[].class)); + } + + private Node.Roles randomRoles() { + return new Node.Roles(randomBoolean(), randomBoolean(), randomBoolean()); + } } diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java index ecee131c7a28e..87f2d7f9a53f8 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.test.rest.yaml.section; import org.elasticsearch.Version; +import org.elasticsearch.client.NodeSelector; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.XContentParser; @@ -35,11 +36,12 @@ import static org.hamcrest.Matchers.nullValue; public class ClientYamlTestSectionTests extends AbstractClientYamlTestFragmentParserTestCase { - public void testAddingDoWithoutWarningWithoutSkip() { + public void testAddingDoWithoutSkips() { int lineNumber = between(1, 10000); ClientYamlTestSection section = new ClientYamlTestSection(new XContentLocation(0, 0), "test"); section.setSkipSection(SkipSection.EMPTY); DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); + doSection.setApiCallSection(new ApiCallSection("test")); section.addExecutableSection(doSection); } @@ -49,6 +51,7 @@ public void testAddingDoWithWarningWithSkip() { section.setSkipSection(new SkipSection(null, singletonList("warnings"), null)); DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); doSection.setExpectedWarningHeaders(singletonList("foo")); + doSection.setApiCallSection(new ApiCallSection("test")); section.addExecutableSection(doSection); } @@ -58,11 +61,37 @@ public void testAddingDoWithWarningWithSkipButNotWarnings() { section.setSkipSection(new SkipSection(null, singletonList("yaml"), null)); DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); doSection.setExpectedWarningHeaders(singletonList("foo")); + doSection.setApiCallSection(new ApiCallSection("test")); Exception e = expectThrows(IllegalArgumentException.class, () -> section.addExecutableSection(doSection)); assertEquals("Attempted to add a [do] with a [warnings] section without a corresponding [skip] so runners that do not support the" + " [warnings] section can skip the test at line [" + lineNumber + "]", e.getMessage()); } + public void testAddingDoWithNodeSelectorWithSkip() { + int lineNumber = between(1, 10000); + ClientYamlTestSection section = new ClientYamlTestSection(new XContentLocation(0, 0), "test"); + section.setSkipSection(new SkipSection(null, singletonList("node_selector"), null)); + DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); + ApiCallSection apiCall = new ApiCallSection("test"); + apiCall.setNodeSelector(NodeSelector.NOT_MASTER_ONLY); + doSection.setApiCallSection(apiCall); + section.addExecutableSection(doSection); + } + + public void testAddingDoWithNodeSelectorWithSkipButNotWarnings() { + int lineNumber = between(1, 10000); + ClientYamlTestSection section = new ClientYamlTestSection(new XContentLocation(0, 0), "test"); + section.setSkipSection(new SkipSection(null, singletonList("yaml"), null)); + DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); + ApiCallSection apiCall = new ApiCallSection("test"); + apiCall.setNodeSelector(NodeSelector.NOT_MASTER_ONLY); + doSection.setApiCallSection(apiCall); + Exception e = expectThrows(IllegalArgumentException.class, () -> section.addExecutableSection(doSection)); + assertEquals("Attempted to add a [do] with a [node_selector] section without a corresponding" + + " [skip] so runners that do not support the [node_selector] section can skip the test at" + + " line [" + lineNumber + "]", e.getMessage()); + } + public void testWrongIndentation() throws Exception { { XContentParser parser = createParser(YamlXContent.yamlXContent, diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java index 982eac4b80274..6fb22ae7cce8c 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java @@ -19,11 +19,16 @@ package org.elasticsearch.test.rest.yaml.section; +import org.apache.http.HttpHost; +import org.elasticsearch.client.Node; +import org.elasticsearch.client.NodeSelector; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.yaml.YamlXContent; +import org.elasticsearch.test.rest.yaml.ClientYamlTestExecutionContext; +import org.elasticsearch.test.rest.yaml.ClientYamlTestResponse; import org.hamcrest.MatcherAssert; import java.io.IOException; @@ -31,11 +36,16 @@ import java.util.Map; import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class DoSectionTests extends AbstractClientYamlTestFragmentParserTestCase { @@ -496,7 +506,35 @@ public void testParseDoSectionExpectedWarnings() throws Exception { assertThat(doSection.getApiCallSection(), notNullValue()); assertThat(doSection.getExpectedWarningHeaders(), equalTo(singletonList( "just one entry this time"))); + } + + public void testNodeSelector() throws IOException { + parser = createParser(YamlXContent.yamlXContent, + "node_selector:\n" + + " version: 5.2.0-6.0.0\n" + + "indices.get_field_mapping:\n" + + " index: test_index" + ); + + DoSection doSection = DoSection.parse(parser); + assertNotSame(NodeSelector.ANY, doSection.getApiCallSection().getNodeSelector()); + Node v170 = nodeWithVersion("1.7.0"); + Node v521 = nodeWithVersion("5.2.1"); + Node v550 = nodeWithVersion("5.5.0"); + Node v612 = nodeWithVersion("6.1.2"); + assertEquals(Arrays.asList(v521, v550), doSection.getApiCallSection().getNodeSelector() + .select(Arrays.asList(v170, v521, v550, v612))); + ClientYamlTestExecutionContext context = mock(ClientYamlTestExecutionContext.class); + ClientYamlTestResponse mockResponse = mock(ClientYamlTestResponse.class); + when(context.callApi("indices.get_field_mapping", singletonMap("index", "test_index"), + emptyList(), emptyMap(), doSection.getApiCallSection().getNodeSelector())).thenReturn(mockResponse); + doSection.execute(context); + verify(context).callApi("indices.get_field_mapping", singletonMap("index", "test_index"), + emptyList(), emptyMap(), doSection.getApiCallSection().getNodeSelector()); + } + private Node nodeWithVersion(String version) { + return new Node(new HttpHost("dummy"), null, null, version, null); } private void assertJsonEquals(Map actual, String expected) throws IOException { diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporter.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporter.java index 88b1a8a9db18f..e80cb7896766f 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporter.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporter.java @@ -17,8 +17,7 @@ import org.elasticsearch.Version; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClientBuilder; -import org.elasticsearch.client.sniff.ElasticsearchHostsSniffer; -import org.elasticsearch.client.sniff.ElasticsearchHostsSniffer.Scheme; +import org.elasticsearch.client.sniff.ElasticsearchNodesSniffer; import org.elasticsearch.client.sniff.Sniffer; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; @@ -305,11 +304,12 @@ static Sniffer createSniffer(final Config config, final RestClient client, final if (sniffingEnabled) { final List hosts = HOST_SETTING.getConcreteSettingForNamespace(config.name()).get(config.settings()); // createHosts(config) ensures that all schemes are the same for all hosts! - final Scheme scheme = hosts.get(0).startsWith("https") ? Scheme.HTTPS : Scheme.HTTP; - final ElasticsearchHostsSniffer hostsSniffer = - new ElasticsearchHostsSniffer(client, ElasticsearchHostsSniffer.DEFAULT_SNIFF_REQUEST_TIMEOUT, scheme); + final ElasticsearchNodesSniffer.Scheme scheme = hosts.get(0).startsWith("https") ? + ElasticsearchNodesSniffer.Scheme.HTTPS : ElasticsearchNodesSniffer.Scheme.HTTP; + final ElasticsearchNodesSniffer hostsSniffer = + new ElasticsearchNodesSniffer(client, ElasticsearchNodesSniffer.DEFAULT_SNIFF_REQUEST_TIMEOUT, scheme); - sniffer = Sniffer.builder(client).setHostsSniffer(hostsSniffer).build(); + sniffer = Sniffer.builder(client).setNodesSniffer(hostsSniffer).build(); // inform the sniffer whenever there's a node failure listener.setSniffer(sniffer); diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/NodeFailureListener.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/NodeFailureListener.java index 6590232fda1ff..8982373f6bdf8 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/NodeFailureListener.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/NodeFailureListener.java @@ -8,6 +8,7 @@ import org.apache.http.HttpHost; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; +import org.elasticsearch.client.Node; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.sniff.Sniffer; import org.elasticsearch.common.Nullable; @@ -76,7 +77,8 @@ public void setResource(@Nullable final HttpResource resource) { } @Override - public void onFailure(final HttpHost host) { + public void onFailure(final Node node) { + HttpHost host = node.getHost(); logger.warn("connection failed to node at [{}://{}:{}]", host.getSchemeName(), host.getHostName(), host.getPort()); final HttpResource resource = this.resource.get(); @@ -86,8 +88,8 @@ public void onFailure(final HttpHost host) { resource.markDirty(); } if (sniffer != null) { - sniffer.sniffOnFailure(host); + sniffer.sniffOnFailure(node); } } -} \ No newline at end of file +} diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/NodeFailureListenerTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/NodeFailureListenerTests.java index f1ecb799406e8..dd1cbb69b680e 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/NodeFailureListenerTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/NodeFailureListenerTests.java @@ -7,6 +7,7 @@ import org.apache.http.HttpHost; import org.apache.lucene.util.SetOnce.AlreadySetException; +import org.elasticsearch.client.Node; import org.elasticsearch.client.sniff.Sniffer; import org.elasticsearch.test.ESTestCase; @@ -21,7 +22,7 @@ public class NodeFailureListenerTests extends ESTestCase { private final Sniffer sniffer = mock(Sniffer.class); private final HttpResource resource = new MockHttpResource(getTestName(), false); - private final HttpHost host = new HttpHost("localhost", 9200); + private final Node node = new Node(new HttpHost("localhost", 9200)); private final NodeFailureListener listener = new NodeFailureListener(); @@ -44,15 +45,15 @@ public void testSetResourceTwiceFails() { public void testSnifferNotifiedOnFailure() { listener.setSniffer(sniffer); - listener.onFailure(host); + listener.onFailure(node); - verify(sniffer).sniffOnFailure(host); + verify(sniffer).sniffOnFailure(node); } public void testResourceNotifiedOnFailure() { listener.setResource(resource); - listener.onFailure(host); + listener.onFailure(node); assertTrue(resource.isDirty()); } @@ -64,14 +65,14 @@ public void testResourceAndSnifferNotifiedOnFailure() { listener.setResource(optionalResource); listener.setSniffer(optionalSniffer); - listener.onFailure(host); + listener.onFailure(node); if (optionalResource != null) { assertTrue(resource.isDirty()); } if (optionalSniffer != null) { - verify(sniffer).sniffOnFailure(host); + verify(sniffer).sniffOnFailure(node); } } From 81dd654f0356a060babb9b450e029f06d749a0d8 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 11 May 2018 09:49:54 -0400 Subject: [PATCH 02/23] Fix test --- .../xpack/monitoring/exporter/http/HttpExporterTests.java | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterTests.java index 52eed801b3273..2d42109c455fa 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterTests.java @@ -8,6 +8,7 @@ import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; import org.apache.http.nio.conn.ssl.SSLIOSessionStrategy; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.sniff.Sniffer; @@ -26,6 +27,7 @@ import org.elasticsearch.xpack.monitoring.exporter.ClusterAlertsUtil; import org.elasticsearch.xpack.monitoring.exporter.Exporter.Config; import org.junit.Before; +import org.mockito.ArgumentCaptor; import org.mockito.InOrder; import java.io.IOException; @@ -300,7 +302,7 @@ public void testCreateSniffer() throws IOException { final StringEntity entity = new StringEntity("{}", ContentType.APPLICATION_JSON); when(response.getEntity()).thenReturn(entity); - when(client.performRequest(eq("get"), eq("/_nodes/http"), anyMapOf(String.class, String.class))).thenReturn(response); + when(client.performRequest(any(Request.class))).thenReturn(response); try (Sniffer sniffer = HttpExporter.createSniffer(config, client, listener)) { assertThat(sniffer, not(nullValue())); @@ -309,7 +311,7 @@ public void testCreateSniffer() throws IOException { } // it's a race whether it triggers this at all - verify(client, atMost(1)).performRequest(eq("get"), eq("/_nodes/http"), anyMapOf(String.class, String.class)); + verify(client, atMost(1)).performRequest(any(Request.class)); verifyNoMoreInteractions(client, listener); } From 73d710f7eb2a851602bd3e68d9bafbdaacf82865 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 11 May 2018 12:19:26 -0400 Subject: [PATCH 03/23] Updates --- .../java/org/elasticsearch/client/Node.java | 8 +-- .../elasticsearch/client/NodeSelector.java | 32 +++++++---- .../org/elasticsearch/client/RestClient.java | 14 ++--- .../elasticsearch/client/RestClientTests.java | 55 ------------------- 4 files changed, 29 insertions(+), 80 deletions(-) diff --git a/client/rest/src/main/java/org/elasticsearch/client/Node.java b/client/rest/src/main/java/org/elasticsearch/client/Node.java index b26a0fa603c99..231ff0e813fc2 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/Node.java +++ b/client/rest/src/main/java/org/elasticsearch/client/Node.java @@ -115,7 +115,7 @@ public Set getBoundHosts() { } /** - * @return the name + * The {@code node.name} of the node. */ public String getName() { return name; @@ -189,19 +189,19 @@ public Roles(boolean masterEligible, boolean data, boolean ingest) { } /** - * The node could be elected master. + * Teturns whether or not the node could be elected master. */ public boolean isMasterEligible() { return masterEligible; } /** - * The node stores data. + * Teturns whether or not the node stores data. */ public boolean isData() { return data; } /** - * The node runs ingest pipelines. + * Teturns whether or not the node runs ingest pipelines. */ public boolean isIngest() { return ingest; diff --git a/client/rest/src/main/java/org/elasticsearch/client/NodeSelector.java b/client/rest/src/main/java/org/elasticsearch/client/NodeSelector.java index b9fb35d24168c..7ea538aec0ab7 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/NodeSelector.java +++ b/client/rest/src/main/java/org/elasticsearch/client/NodeSelector.java @@ -30,19 +30,27 @@ */ public interface NodeSelector { /** - * Select the {@link Node}s to which to send requests. This may be called - * twice per request, once for "living" nodes that have not had been - * blacklisted by previous errors if there are any. If it returns an - * empty list when sent the living nodes or if there aren't any living - * nodes left then this will be called with a list of "dead" nodes that - * have been blacklisted by previous failures. In both cases it should - * return a list of nodes sorted by its preference for which node is used. - * If it is operating on "living" nodes that it returns function as - * fallbacks in case of request failures. If it is operating on dead nodes - * then the dead node that it returns is attempted but no others. + * Select the {@link Node}s to which to send requests. This is called with + * a list of {@linkplain Node}s in the order that the rest client would + * prefer to use them and it should remove nodes from the list that should + * not receive the request. + *

+ * This may be called twice per request: first for "living" nodes that + * have not been blacklisted by previous errors. In this case the order + * of the nodes is the order in which the client thinks that they should + * be tried next. If the selector removes all nodes from the list or if + * there aren't any living nodes then the the client will call this method + * with a list of "dead" nodes. In this case the list is sorted "soonest + * to be revived" first. In this case the rest client will only attempt + * the first node. + *

+ * Implementations may reorder the list but they should + * be careful in doing so as the original order is important (see above). + * An implementation that sorts list consistently will consistently send + * requests to s single node, overloading it. So implementations that + * reorder the list should take the original order into account + * somehow. * - * @param nodes an unmodifiable list of {@linkplain Node}s in the order - * that the {@link RestClient} would prefer to use them * @return a subset of the provided list of {@linkplain Node}s that the * selector approves of, in the order that the selector would prefer * to use them. diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java index 80454ab245d0c..00232c93fb34b 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java @@ -142,14 +142,6 @@ public static RestClientBuilder builder(HttpHost... hosts) { return new RestClientBuilder(hostsToNodes(hosts)); } - /** - * Replaces the nodes that the client communicates without providing any - * metadata about any of the nodes. - */ - public void setHosts(HttpHost... hosts) { - setNodes(hostsToNodes(hosts)); - } - /** * Replaces the nodes that the client communicates with. Prefer this to * {@link #setHosts(HttpHost...)} if you have metadata about the hosts @@ -621,6 +613,9 @@ private NodeTuple> nextNode(NodeSelector nodeSelector) throws IOE return new NodeTuple<>(hosts.iterator(), nodeTuple.authCache); } + /** + * Select hosts to try. Package private for testing. + */ static List selectHosts(NodeTuple> nodeTuple, Map blacklist, AtomicInteger lastNodeIndex, long now, NodeSelector nodeSelector) throws IOException { @@ -646,12 +641,13 @@ static List selectHosts(NodeTuple> nodeTuple, if (false == livingNodes.isEmpty()) { /* * Normal state: there is at least one living node. Rotate the - * list so subsequent requests to will prefer the nodes in a + * list so subsequent requests will prefer the nodes in a * different order then run them through the NodeSelector so it * can have its say in which nodes are ok and their ordering. If * the selector is ok with any over the living nodes then use * them for the request. */ + // TODO this is going to send more requests to nodes right *after* a node that the selector removes Collections.rotate(livingNodes, lastNodeIndex.getAndIncrement()); List selectedLivingNodes = nodeSelector.select(livingNodes); if (false == selectedLivingNodes.isEmpty()) { diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java index a3b295ec7c627..7e1926159f85d 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java @@ -212,33 +212,6 @@ public void testBuildUriLeavesPathUntouched() { } } - public void testSetHostsWrongArguments() throws IOException { - try (RestClient restClient = createRestClient()) { - restClient.setHosts((HttpHost[]) null); - fail("setHosts should have failed"); - } catch (IllegalArgumentException e) { - assertEquals("hosts must not be null or empty", e.getMessage()); - } - try (RestClient restClient = createRestClient()) { - restClient.setHosts(); - fail("setHosts should have failed"); - } catch (IllegalArgumentException e) { - assertEquals("hosts must not be null or empty", e.getMessage()); - } - try (RestClient restClient = createRestClient()) { - restClient.setHosts((HttpHost) null); - fail("setHosts should have failed"); - } catch (IllegalArgumentException e) { - assertEquals("host cannot be null", e.getMessage()); - } - try (RestClient restClient = createRestClient()) { - restClient.setHosts(new HttpHost("localhost", 9200), null, new HttpHost("localhost", 9201)); - fail("setHosts should have failed"); - } catch (IllegalArgumentException e) { - assertEquals("host cannot be null", e.getMessage()); - } - } - /** * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testConstructor()}. */ @@ -395,34 +368,6 @@ public String toString() { } } - public void testSetHostsFailures() throws IOException { - RestClient restClient = createRestClient(); - try { - restClient.setHosts((HttpHost[]) null); - fail("setHosts should have failed"); - } catch (IllegalArgumentException e) { - assertEquals("hosts must not be null or empty", e.getMessage()); - } - try { - restClient.setHosts(); - fail("setHosts should have failed"); - } catch (IllegalArgumentException e) { - assertEquals("hosts must not be null or empty", e.getMessage()); - } - try { - restClient.setHosts((HttpHost) null); - fail("setHosts should have failed"); - } catch (IllegalArgumentException e) { - assertEquals("host cannot be null", e.getMessage()); - } - try { - restClient.setHosts(new HttpHost("localhost", 9200), null, new HttpHost("localhost", 9201)); - fail("setHosts should have failed"); - } catch (IllegalArgumentException e) { - assertEquals("host cannot be null", e.getMessage()); - } - } - public void testSetNodesFailures() throws IOException { RestClient restClient = createRestClient(); try { From 7eb276b5a21a191cd2856dbfef97ec85ad739043 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 11 May 2018 13:02:36 -0400 Subject: [PATCH 04/23] Move Compose --- .../elasticsearch/client/NodeSelector.java | 27 ----------------- .../org/elasticsearch/client/RestClient.java | 12 +++++--- .../elasticsearch/client/sniff/Sniffer.java | 3 +- .../test/rest/yaml/section/DoSection.java | 29 ++++++++++++++++++- 4 files changed, 38 insertions(+), 33 deletions(-) diff --git a/client/rest/src/main/java/org/elasticsearch/client/NodeSelector.java b/client/rest/src/main/java/org/elasticsearch/client/NodeSelector.java index 7ea538aec0ab7..e9fbc9b6999d6 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/NodeSelector.java +++ b/client/rest/src/main/java/org/elasticsearch/client/NodeSelector.java @@ -21,7 +21,6 @@ import java.util.ArrayList; import java.util.List; -import java.util.Objects; /** * Selects nodes that can receive requests. Used to keep requests away @@ -95,30 +94,4 @@ public String toString() { return "NOT_MASTER_ONLY"; } }; - - /** - * Selector that composes two selectors, running the "right" most selector - * first and then running the "left" selector on the results of the "right" - * selector. - */ - class Compose implements NodeSelector { - private final NodeSelector lhs; - private final NodeSelector rhs; - - public Compose(NodeSelector lhs, NodeSelector rhs) { - this.lhs = Objects.requireNonNull(lhs, "lhs is required"); - this.rhs = Objects.requireNonNull(rhs, "rhs is required"); - } - - @Override - public List select(List nodes) { - return lhs.select(rhs.select(nodes)); - } - - @Override - public String toString() { - // . as in haskell's "compose" operator - return lhs + "." + rhs; - } - } } diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java index 00232c93fb34b..1e6a477e215d6 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java @@ -53,6 +53,7 @@ import java.io.Closeable; import java.io.IOException; +import java.net.ConnectException; import java.net.SocketTimeoutException; import java.net.URI; import java.net.URISyntaxException; @@ -82,7 +83,7 @@ *

* Must be created using {@link RestClientBuilder}, which allows to set all the different options or just rely on defaults. * The hosts that are part of the cluster need to be provided at creation time, but can also be replaced later - * by calling {@link #setHosts(HttpHost...)}. + * by calling {@link #setNodes(Node...)}. *

* The method {@link #performRequest(String, String, Map, HttpEntity, Header...)} allows to send a request to the cluster. When * sending a request, a host gets selected out of the provided ones in a round-robin fashion. Failing hosts are marked dead and @@ -143,9 +144,7 @@ public static RestClientBuilder builder(HttpHost... hosts) { } /** - * Replaces the nodes that the client communicates with. Prefer this to - * {@link #setHosts(HttpHost...)} if you have metadata about the hosts - * like their Elasticsearch version of which roles they implement. + * Replaces the nodes with which the client communicates. */ public synchronized void setNodes(Node... nodes) { if (nodes == null || nodes.length == 0) { @@ -932,6 +931,11 @@ Response get() throws IOException { e.initCause(exception); throw e; } + if (exception instanceof ConnectException) { + ConnectException e = new ConnectException(exception.getMessage()); + e.initCause(exception); + throw e; + } if (exception instanceof IOException) { throw new IOException(exception.getMessage(), exception); } diff --git a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java index bc2417cb8d8c8..c2366c797ddc7 100644 --- a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java +++ b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java @@ -154,7 +154,8 @@ synchronized void shutdown() { /** * Returns a new {@link SnifferBuilder} to help with {@link Sniffer} creation. * - * @param restClient the client that gets its hosts set (via {@link RestClient#setHosts(HttpHost...)}) once they are fetched + * @param restClient the client that gets its hosts set (via + * {@link RestClient#setNodes(Node...)}) once they are fetched * @return a new instance of {@link SnifferBuilder} */ public static SnifferBuilder builder(RestClient restClient) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java index 210e88704eafd..a63eb44d0d8fa 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java @@ -43,6 +43,7 @@ import java.util.LinkedHashSet; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.TreeMap; import java.util.regex.Matcher; @@ -133,7 +134,7 @@ public static DoSection parse(XContentParser parser) throws IOException { NodeSelector newSelector = buildNodeSelector( parser.getTokenLocation(), selectorName, parser.text()); nodeSelector = nodeSelector == NodeSelector.ANY ? - newSelector : new NodeSelector.Compose(nodeSelector, newSelector); + newSelector : new ComposeNodeSelector(nodeSelector, newSelector); } } } else if (currentFieldName != null) { // must be part of API call then @@ -385,4 +386,30 @@ public String toString() { throw new IllegalArgumentException("unknown node_selector [" + name + "]"); } } + + /** + * Selector that composes two selectors, running the "right" most selector + * first and then running the "left" selector on the results of the "right" + * selector. + */ + private static class ComposeNodeSelector implements NodeSelector { + private final NodeSelector lhs; + private final NodeSelector rhs; + + private ComposeNodeSelector(NodeSelector lhs, NodeSelector rhs) { + this.lhs = Objects.requireNonNull(lhs, "lhs is required"); + this.rhs = Objects.requireNonNull(rhs, "rhs is required"); + } + + @Override + public List select(List nodes) { + return lhs.select(rhs.select(nodes)); + } + + @Override + public String toString() { + // . as in haskell's "compose" operator + return lhs + "." + rhs; + } + } } From 0e6c47996452ebd89a6263eafdea83323a2e9102 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 1 Jun 2018 10:09:09 -0400 Subject: [PATCH 05/23] Fix javadoc --- .../java/org/elasticsearch/client/NodeSelector.java | 2 +- .../java/org/elasticsearch/client/RestClient.java | 11 ++++++++--- .../org/elasticsearch/client/RestClientTests.java | 4 ++-- 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/client/rest/src/main/java/org/elasticsearch/client/NodeSelector.java b/client/rest/src/main/java/org/elasticsearch/client/NodeSelector.java index e9fbc9b6999d6..8e6b3eb7dc67d 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/NodeSelector.java +++ b/client/rest/src/main/java/org/elasticsearch/client/NodeSelector.java @@ -25,7 +25,7 @@ /** * Selects nodes that can receive requests. Used to keep requests away * from master nodes or to send them to nodes with a particular attribute. - * Use with {@link Request#setNodeSelector(NodeSelector)}. + * Use with {@link RequestOptions.Builder#setNodeSelector(NodeSelector)}. */ public interface NodeSelector { /** diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java index 671c5961b39b9..90abdd20fab85 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java @@ -65,6 +65,8 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.LinkedHashSet; import java.util.List; import java.util.Locale; import java.util.Map; @@ -152,14 +154,17 @@ public synchronized void setNodes(Node... nodes) { } AuthCache authCache = new BasicAuthCache(); + Map nodeSet = new LinkedHashMap<>(); for (Node node : nodes) { if (node == null) { - throw new IllegalArgumentException("node cannot be null"); + throw new NullPointerException("node cannot be null"); } + // TODO should we throw an IAE if this happens? + nodeSet.put(node.getHost(), node); authCache.put(node.getHost(), new BasicScheme()); } - this.nodeTuple = new NodeTuple<>(Collections.unmodifiableList( - Arrays.asList(nodes)), authCache); + this.nodeTuple = new NodeTuple<>( + Collections.unmodifiableList(new ArrayList<>(nodeSet.values())), authCache); this.blacklist.clear(); } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java index 3473954b6679e..0aaf1ca33b61d 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java @@ -237,13 +237,13 @@ public void testSetNodesWrongArguments() throws IOException { restClient.setNodes((Node[]) null); fail("setNodes should have failed"); } catch (IllegalArgumentException e) { - assertEquals("node must not be null nor empty", e.getMessage()); + assertEquals("nodes must not be null or empty", e.getMessage()); } try (RestClient restClient = createRestClient()) { restClient.setNodes(); fail("setNodes should have failed"); } catch (IllegalArgumentException e) { - assertEquals("node must not be null nor empty", e.getMessage()); + assertEquals("nodes must not be null or empty", e.getMessage()); } try (RestClient restClient = createRestClient()) { restClient.setNodes((Node) null); From e13171d9c5d14938c61bed7a0d6b7563f412c143 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 1 Jun 2018 14:42:28 -0400 Subject: [PATCH 06/23] Make NodeSelector work on mutable Iterable --- .../elasticsearch/client/NodeSelector.java | 43 ++++++++----------- .../org/elasticsearch/client/RestClient.java | 7 +-- .../client/NodeSelectorTests.java | 13 ++++-- .../RestClientMultipleHostsIntegTests.java | 10 ++--- .../client/RestClientMultipleHostsTests.java | 14 ++++-- .../elasticsearch/client/RestClientTests.java | 19 ++++---- .../test/rest/yaml/section/DoSection.java | 17 ++++---- 7 files changed, 67 insertions(+), 56 deletions(-) diff --git a/client/rest/src/main/java/org/elasticsearch/client/NodeSelector.java b/client/rest/src/main/java/org/elasticsearch/client/NodeSelector.java index 8e6b3eb7dc67d..c9c3f722e5538 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/NodeSelector.java +++ b/client/rest/src/main/java/org/elasticsearch/client/NodeSelector.java @@ -19,8 +19,7 @@ package org.elasticsearch.client; -import java.util.ArrayList; -import java.util.List; +import java.util.Iterator; /** * Selects nodes that can receive requests. Used to keep requests away @@ -30,9 +29,10 @@ public interface NodeSelector { /** * Select the {@link Node}s to which to send requests. This is called with - * a list of {@linkplain Node}s in the order that the rest client would - * prefer to use them and it should remove nodes from the list that should - * not receive the request. + * a mutable {@link Iterable} of {@linkplain Node}s in the order that the + * rest client would prefer to use them and implementers should remove + * nodes from the that should not receive the request. Implementers may + * iterate the nodes as many times as they need. *

* This may be called twice per request: first for "living" nodes that * have not been blacklisted by previous errors. In this case the order @@ -42,27 +42,21 @@ public interface NodeSelector { * with a list of "dead" nodes. In this case the list is sorted "soonest * to be revived" first. In this case the rest client will only attempt * the first node. - *

- * Implementations may reorder the list but they should - * be careful in doing so as the original order is important (see above). - * An implementation that sorts list consistently will consistently send - * requests to s single node, overloading it. So implementations that - * reorder the list should take the original order into account - * somehow. - * - * @return a subset of the provided list of {@linkplain Node}s that the - * selector approves of, in the order that the selector would prefer - * to use them. */ - List select(List nodes); + void select(Iterable nodes); + /* + * We were fairly careful with our choice of Iterable here. The caller has + * a List but reordering the list is likely to break round robin. Luckilly + * Iterable doesn't allow any reordering. + */ /** * Selector that matches any node. */ NodeSelector ANY = new NodeSelector() { @Override - public List select(List nodes) { - return nodes; + public void select(Iterable nodes) { + // Intentionally does nothing } @Override @@ -78,15 +72,14 @@ public String toString() { */ NodeSelector NOT_MASTER_ONLY = new NodeSelector() { @Override - public List select(List nodes) { - List subset = new ArrayList<>(nodes.size()); - for (Node node : nodes) { + public void select(Iterable nodes) { + for (Iterator itr = nodes.iterator(); itr.hasNext();) { + Node node = itr.next(); if (node.getRoles() == null) continue; - if (false == node.getRoles().isMasterEligible() || node.getRoles().isData()) { - subset.add(node); + if (node.getRoles().isMasterEligible() && false == node.getRoles().isData()) { + itr.remove(); } } - return subset; } @Override diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java index 90abdd20fab85..f7c5a3ae1f22e 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java @@ -650,8 +650,9 @@ static List selectHosts(NodeTuple> nodeTuple, * them for the request. */ // TODO this is going to send more requests to nodes right *after* a node that the selector removes - Collections.rotate(livingNodes, lastNodeIndex.getAndIncrement()); - List selectedLivingNodes = nodeSelector.select(livingNodes); + List selectedLivingNodes = new ArrayList<>(livingNodes); + Collections.rotate(selectedLivingNodes, lastNodeIndex.getAndIncrement()); + nodeSelector.select(selectedLivingNodes); if (false == selectedLivingNodes.isEmpty()) { return selectedLivingNodes; } @@ -680,7 +681,7 @@ public int compare(DeadNodeAndRevival lhs, DeadNodeAndRevival rhs) { for (DeadNodeAndRevival n : deadNodes) { selectedDeadNodes.add(n.node); } - selectedDeadNodes = nodeSelector.select(selectedDeadNodes); + nodeSelector.select(selectedDeadNodes); if (false == selectedDeadNodes.isEmpty()) { return singletonList(selectedDeadNodes.get(0)); } diff --git a/client/rest/src/test/java/org/elasticsearch/client/NodeSelectorTests.java b/client/rest/src/test/java/org/elasticsearch/client/NodeSelectorTests.java index e8aa7a175be8b..12f5f5e2cc75e 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/NodeSelectorTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/NodeSelectorTests.java @@ -36,7 +36,9 @@ public void testAny() { for (int i = 0; i < size; i++) { nodes.add(dummyNode(randomBoolean(), randomBoolean(), randomBoolean())); } - assertEquals(nodes, NodeSelector.ANY.select(nodes)); + List expected = new ArrayList<>(nodes); + NodeSelector.ANY.select(nodes); + assertEquals(expected, nodes); } public void testNotMasterOnly() { @@ -44,11 +46,16 @@ public void testNotMasterOnly() { Node masterAndData = dummyNode(true, true, randomBoolean()); Node coordinatingOnly = dummyNode(false, false, randomBoolean()); Node data = dummyNode(false, true, randomBoolean()); - List nodes = Arrays.asList(masterOnly, masterAndData, coordinatingOnly, data); + List nodes = new ArrayList<>(); + nodes.add(masterOnly); + nodes.add(masterAndData); + nodes.add(coordinatingOnly); + nodes.add(data); Collections.shuffle(nodes, getRandom()); List expected = new ArrayList<>(nodes); expected.remove(masterOnly); - assertEquals(expected, NodeSelector.NOT_MASTER_ONLY.select(nodes)); + NodeSelector.NOT_MASTER_ONLY.select(nodes); + assertEquals(expected, nodes); } private Node dummyNode(boolean master, boolean data, boolean ingest) { diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java index 6f49f2f67acf3..acd57e40ae17a 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java @@ -34,6 +34,7 @@ import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.Collections; +import java.util.Iterator; import java.util.List; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; @@ -249,13 +250,12 @@ Response getResponse() { private NodeSelector firstPositionNodeSelector() { return new NodeSelector() { @Override - public List select(List nodes) { - for (Node node : nodes) { - if (httpHosts[0] == node.getHost()) { - return singletonList(node); + public void select(Iterable nodes) { + for (Iterator itr = nodes.iterator(); itr.hasNext();) { + if (httpHosts[0] != itr.next().getHost()) { + itr.remove(); } } - return Collections.emptyList(); } }; } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java index b2ca08d22390d..f51d3740bf4bd 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java @@ -45,6 +45,7 @@ import java.net.SocketTimeoutException; import java.util.Collections; import java.util.HashSet; +import java.util.Iterator; import java.util.List; import java.util.Set; import java.util.concurrent.ExecutorService; @@ -310,9 +311,16 @@ public void testRoundRobinRetryErrors() throws IOException { public void testNodeSelector() throws IOException { NodeSelector firstPositionOnly = new NodeSelector() { @Override - public List select(List restClientNodes) { - assertThat(restClientNodes, hasItem(nodes[0])); - return singletonList(nodes[0]); + public void select(Iterable restClientNodes) { + boolean found = false; + for (Iterator itr = restClientNodes.iterator(); itr.hasNext();) { + if (nodes[0] == itr.next()) { + found = true; + } else { + itr.remove(); + } + } + assertTrue(found); } }; int rounds = between(1, 10); diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java index 0aaf1ca33b61d..98aa925e1bb93 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java @@ -26,10 +26,10 @@ import java.io.IOException; import java.net.URI; -import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; @@ -316,14 +316,12 @@ public void testSelectHosts() throws IOException { NodeSelector not1 = new NodeSelector() { @Override - public List select(List nodes) { - List result = new ArrayList<>(); - for (Node node : nodes) { - if (false == "1".equals(node.getVersion())) { - result.add(node); + public void select(Iterable nodes) { + for (Iterator itr = nodes.iterator(); itr.hasNext();) { + if ("1".equals(itr.next().getVersion())) { + itr.remove(); } } - return result; } @Override @@ -333,8 +331,11 @@ public String toString() { }; NodeSelector noNodes = new NodeSelector() { @Override - public List select(List nodes) { - return Collections.emptyList(); + public void select(Iterable nodes) { + for (Iterator itr = nodes.iterator(); itr.hasNext();) { + itr.next(); + itr.remove(); + } } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java index 87dadc5508246..8cfbf11bd64b7 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java @@ -40,6 +40,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; +import java.util.Iterator; import java.util.LinkedHashSet; import java.util.List; import java.util.Map; @@ -373,19 +374,18 @@ private static NodeSelector buildNodeSelector(XContentLocation location, String Version[] range = SkipSection.parseVersionRange(value); return new NodeSelector() { @Override - public List select(List nodes) { - List result = new ArrayList<>(nodes.size()); - for (Node node : nodes) { + public void select(Iterable nodes) { + for (Iterator itr = nodes.iterator(); itr.hasNext();) { + Node node = itr.next(); if (node.getVersion() == null) { throw new IllegalStateException("expected [version] metadata to be set but got " + node); } Version version = Version.fromString(node.getVersion()); - if (version.onOrAfter(range[0]) && version.onOrBefore(range[1])) { - result.add(node); + if (false == (version.onOrAfter(range[0]) && version.onOrBefore(range[1]))) { + itr.remove(); } } - return result; } @Override @@ -413,8 +413,9 @@ private ComposeNodeSelector(NodeSelector lhs, NodeSelector rhs) { } @Override - public List select(List nodes) { - return lhs.select(rhs.select(nodes)); + public void select(Iterable nodes) { + rhs.select(nodes); + lhs.select(nodes); } @Override From 7339b2622bc66f79a089617ae1477a57a8704c8c Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 1 Jun 2018 14:57:15 -0400 Subject: [PATCH 07/23] Some cleanup Stll a few comments left to get through. --- .../org/elasticsearch/client/RestClient.java | 16 ++++++++-------- .../rest/yaml/ESClientYamlSuiteTestCase.java | 5 +---- 2 files changed, 9 insertions(+), 12 deletions(-) diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java index f7c5a3ae1f22e..e210185802d6f 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java @@ -126,7 +126,7 @@ public class RestClient implements Closeable { * Returns a new {@link RestClientBuilder} to help with {@link RestClient} creation. * Creates a new builder instance and sets the hosts that the client will send requests to. *

- * Prefer this to {@link #builder(Node...)} if you have metadata up front about the nodes. + * Prefer this to {@link #builder(HttpHost...)} if you have metadata up front about the nodes. * If you don't either one is fine. */ public static RestClientBuilder builder(Node... nodes) { @@ -670,12 +670,7 @@ static List selectHosts(NodeTuple> nodeTuple, * node. */ if (false == deadNodes.isEmpty()) { - Collections.sort(deadNodes, new Comparator() { - @Override - public int compare(DeadNodeAndRevival lhs, DeadNodeAndRevival rhs) { - return Long.compare(rhs.nanosUntilRevival, lhs.nanosUntilRevival); - } - }); + Collections.sort(deadNodes); List selectedDeadNodes = new ArrayList<>(deadNodes.size()); for (DeadNodeAndRevival n : deadNodes) { @@ -985,7 +980,7 @@ static class NodeTuple { * Contains a reference to a blacklisted node and the time until it is * revived. We use this so we can do a single pass over the blacklist. */ - private static class DeadNodeAndRevival { + private static class DeadNodeAndRevival implements Comparable { final Node node; final long nanosUntilRevival; @@ -998,6 +993,11 @@ private static class DeadNodeAndRevival { public String toString() { return node.toString(); } + + @Override + public int compareTo(DeadNodeAndRevival lhs) { + return Long.compare(nanosUntilRevival, lhs.nanosUntilRevival); + } } /** diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java index a45049cc842b5..ab692cbaaee9c 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java @@ -21,19 +21,16 @@ import com.carrotsearch.randomizedtesting.RandomizedTest; import org.apache.http.HttpHost; -import org.apache.http.entity.StringEntity; import org.elasticsearch.Version; import org.elasticsearch.client.Node; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; -import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.sniff.ElasticsearchNodesSniffer; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestApi; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestSpec; @@ -382,7 +379,7 @@ protected boolean randomizeContentType() { return true; } - /** + /** * Sniff the cluster for host metadata if it hasn't already been sniffed. This isn't the * same thing as using the {@link Sniffer} because: *

    From 2ae7c27f20d23ce0e006a9fd8520e937325cb485 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 1 Jun 2018 15:57:56 -0400 Subject: [PATCH 08/23] Backwards --- .../src/main/java/org/elasticsearch/client/RestClient.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java index e210185802d6f..3de1d6ee169a2 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java @@ -995,8 +995,8 @@ public String toString() { } @Override - public int compareTo(DeadNodeAndRevival lhs) { - return Long.compare(nanosUntilRevival, lhs.nanosUntilRevival); + public int compareTo(DeadNodeAndRevival rhs) { + return Long.compare(rhs.nanosUntilRevival, nanosUntilRevival); } } From d551dcbd238d5806ce30aeae9179f931afb2cd23 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 1 Jun 2018 18:57:56 -0400 Subject: [PATCH 09/23] Drop equals from Node --- .../java/org/elasticsearch/client/Node.java | 18 ------- .../org/elasticsearch/client/NodeTests.java | 25 ---------- .../ElasticsearchNodesSnifferParseTests.java | 47 +++++++++++-------- .../sniff/ElasticsearchNodesSnifferTests.java | 3 +- .../client/sniff/SnifferTests.java | 28 +++++++++-- .../rest/yaml/ESClientYamlSuiteTestCase.java | 4 +- 6 files changed, 55 insertions(+), 70 deletions(-) diff --git a/client/rest/src/main/java/org/elasticsearch/client/Node.java b/client/rest/src/main/java/org/elasticsearch/client/Node.java index 231ff0e813fc2..0f7050f13667e 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/Node.java +++ b/client/rest/src/main/java/org/elasticsearch/client/Node.java @@ -156,24 +156,6 @@ public String toString() { return b.append(']').toString(); } - @Override - public boolean equals(Object obj) { - if (obj == null || obj.getClass() != getClass()) { - return false; - } - Node other = (Node) obj; - return host.equals(other.host) - && Objects.equals(boundHosts, other.boundHosts) - && Objects.equals(version, other.version) - && Objects.equals(name, other.name) - && Objects.equals(roles, other.roles); - } - - @Override - public int hashCode() { - return Objects.hash(host, boundHosts, name, version, roles); - } - /** * Role information about an Elasticsearch process. */ diff --git a/client/rest/src/test/java/org/elasticsearch/client/NodeTests.java b/client/rest/src/test/java/org/elasticsearch/client/NodeTests.java index 989861df50293..72f22eabb5ae8 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/NodeTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/NodeTests.java @@ -25,10 +25,7 @@ import java.util.Arrays; import java.util.HashSet; -import static java.util.Collections.singleton; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; public class NodeTests extends RestClientTestCase { public void testWithHost() { @@ -64,26 +61,4 @@ public void testToString() { "nam", "ver", new Roles(true, false, false)).toString()); } - - public void testEqualsAndHashCode() { - HttpHost host = new HttpHost(randomAsciiAlphanumOfLength(5)); - Node node = new Node(host, - randomBoolean() ? null : singleton(host), - randomBoolean() ? null : randomAsciiAlphanumOfLength(5), - randomBoolean() ? null : randomAsciiAlphanumOfLength(5), - randomBoolean() ? null : new Roles(true, true, true)); - assertFalse(node.equals(null)); - assertTrue(node.equals(node)); - assertEquals(node.hashCode(), node.hashCode()); - Node copy = new Node(host, node.getBoundHosts(), node.getName(), node.getVersion(), node.getRoles()); - assertTrue(node.equals(copy)); - assertEquals(node.hashCode(), copy.hashCode()); - assertFalse(node.equals(new Node(new HttpHost(host.toHostString() + "changed"), node.getBoundHosts(), - node.getName(), node.getVersion(), node.getRoles()))); - assertFalse(node.equals(new Node(host, new HashSet<>(Arrays.asList(host, new HttpHost(host.toHostString() + "changed"))), - node.getName(), node.getVersion(), node.getRoles()))); - assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName() + "changed", node.getVersion(), node.getRoles()))); - assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName(), node.getVersion() + "changed", node.getRoles()))); - assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName(), node.getVersion(), new Roles(false, false, false)))); - } } diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferParseTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferParseTests.java index 712a836a17b8a..d076ef6bcd91e 100644 --- a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferParseTests.java +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferParseTests.java @@ -30,12 +30,16 @@ import java.io.IOException; import java.io.InputStream; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; import java.util.HashSet; import java.util.List; import java.util.Set; import com.fasterxml.jackson.core.JsonFactory; +import static org.elasticsearch.client.sniff.SnifferTests.assertNodesEquals; import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasSize; import static org.junit.Assert.assertThat; @@ -53,11 +57,14 @@ private void checkFile(String file, Node... expected) throws IOException { try { HttpEntity entity = new InputStreamEntity(in, ContentType.APPLICATION_JSON); List nodes = ElasticsearchNodesSniffer.readHosts(entity, Scheme.HTTP, new JsonFactory()); - // Use these assertions because the error messages are nicer than hasItems. - assertThat(nodes, hasSize(expected.length)); - for (Node expectedNode : expected) { - assertThat(nodes, hasItem(expectedNode)); - } + // Sort he list so the error messages are easier to read. + Collections.sort(nodes, new Comparator() { + @Override + public int compare(Node lhs, Node rhs) { + return lhs.getName().compareTo(rhs.getName()); + } + }); + assertNodesEquals(Arrays.asList(expected), nodes); } finally { in.close(); } @@ -65,38 +72,38 @@ private void checkFile(String file, Node... expected) throws IOException { public void test2x() throws IOException { checkFile("2.0.0_nodes_http.json", - node(9200, "m1", "2.0.0", true, false, false), - node(9202, "m2", "2.0.0", true, true, false), - node(9201, "m3", "2.0.0", true, false, false), + node(9207, "c1", "2.0.0", false, false, false), + node(9206, "c2", "2.0.0", false, false, false), node(9205, "d1", "2.0.0", false, true, false), node(9204, "d2", "2.0.0", false, true, false), node(9203, "d3", "2.0.0", false, true, false), - node(9207, "c1", "2.0.0", false, false, false), - node(9206, "c2", "2.0.0", false, false, false)); + node(9200, "m1", "2.0.0", true, false, false), + node(9202, "m2", "2.0.0", true, true, false), + node(9201, "m3", "2.0.0", true, false, false)); } public void test5x() throws IOException { checkFile("5.0.0_nodes_http.json", - node(9200, "m1", "5.0.0", true, false, true), - node(9201, "m2", "5.0.0", true, true, true), - node(9202, "m3", "5.0.0", true, false, true), + node(9206, "c1", "5.0.0", false, false, true), + node(9207, "c2", "5.0.0", false, false, true), node(9203, "d1", "5.0.0", false, true, true), node(9204, "d2", "5.0.0", false, true, true), node(9205, "d3", "5.0.0", false, true, true), - node(9206, "c1", "5.0.0", false, false, true), - node(9207, "c2", "5.0.0", false, false, true)); + node(9200, "m1", "5.0.0", true, false, true), + node(9201, "m2", "5.0.0", true, true, true), + node(9202, "m3", "5.0.0", true, false, true)); } public void test6x() throws IOException { checkFile("6.0.0_nodes_http.json", - node(9200, "m1", "6.0.0", true, false, true), - node(9201, "m2", "6.0.0", true, true, true), - node(9202, "m3", "6.0.0", true, false, true), + node(9206, "c1", "6.0.0", false, false, true), + node(9207, "c2", "6.0.0", false, false, true), node(9203, "d1", "6.0.0", false, true, true), node(9204, "d2", "6.0.0", false, true, true), node(9205, "d3", "6.0.0", false, true, true), - node(9206, "c1", "6.0.0", false, false, true), - node(9207, "c2", "6.0.0", false, false, true)); + node(9200, "m1", "6.0.0", true, false, true), + node(9201, "m2", "6.0.0", true, true, true), + node(9202, "m3", "6.0.0", true, false, true)); } private Node node(int port, String name, String version, boolean master, boolean data, boolean ingest) { diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferTests.java index d5492babefb7c..e8b583dedcaa1 100644 --- a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferTests.java +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferTests.java @@ -54,6 +54,7 @@ import java.util.Map; import java.util.Set; +import static org.elasticsearch.client.sniff.SnifferTests.assertNodesEquals; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.startsWith; @@ -120,7 +121,7 @@ public void testSniffNodes() throws IOException { if (sniffResponse.isFailure) { fail("sniffNodes should have failed"); } - assertEquals(sniffResponse.result, sniffedNodes); + assertNodesEquals(sniffResponse.result, sniffedNodes); } catch(ResponseException e) { Response response = e.getResponse(); if (sniffResponse.isFailure) { diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java index d6b19946f2880..17fe558698150 100644 --- a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java @@ -32,6 +32,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collections; +import java.util.Iterator; import java.util.List; import java.util.Set; import java.util.concurrent.CancellationException; @@ -56,7 +57,7 @@ import static org.hamcrest.Matchers.is; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertNotSame; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertSame; import static org.junit.Assert.assertThat; @@ -111,11 +112,11 @@ public void shutdown() { fail("should have failed given that nodesSniffer says it threw an exception"); } else if (nodesSniffer.emptyList.get() > emptyList) { emptyList++; - assertEquals(lastNodes, restClient.getNodes()); + assertNodesEquals(lastNodes, restClient.getNodes()); } else { - assertNotEquals(lastNodes, restClient.getNodes()); + assertNotSame(lastNodes, restClient.getNodes()); List expectedNodes = CountingNodesSniffer.buildNodes(runs); - assertEquals(expectedNodes, restClient.getNodes()); + assertNodesEquals(expectedNodes, restClient.getNodes()); lastNodes = restClient.getNodes(); } } catch(IOException e) { @@ -653,4 +654,23 @@ public void testDefaultSchedulerShutdown() throws Exception { verify(executor, times(2)).awaitTermination(1000, TimeUnit.MILLISECONDS); verifyNoMoreInteractions(executor); } + + static final void assertNodesEquals(List expected, List actual) { + try { + assertEquals(expected.size(), actual.size()); + Iterator expectedItr = expected.iterator(); + Iterator actualItr = actual.iterator(); + while (expectedItr.hasNext()) { + Node expectedNode = expectedItr.next(); + Node actualNode = actualItr.next(); + assertEquals(expectedNode.getHost(), actualNode.getHost()); + assertEquals(expectedNode.getBoundHosts(), actualNode.getBoundHosts()); + assertEquals(expectedNode.getName(), actualNode.getName()); + assertEquals(expectedNode.getVersion(), actualNode.getVersion()); + assertEquals(expectedNode.getRoles(), actualNode.getRoles()); + } + } catch (AssertionError e) { + throw new AssertionError("nodes differ expected: " + expected + " but was: " + actual, e); + } + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java index ab692cbaaee9c..1939d27d040b0 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java @@ -406,8 +406,8 @@ private void sniffHostMetadata(RestClient client) throws IOException { ElasticsearchNodesSniffer.Scheme.valueOf(getProtocol().toUpperCase(Locale.ROOT)); /* * We don't want to change the list of nodes that the client communicates with - * because that'd just be rude. So instead we replace the nodes find the nodes - * returned by the sniffer that correspond with the nodes already the client + * because that'd just be rude. So instead we find the nodes + * returned by the sniffer that correspond with the nodes already in the client * and set the nodes to them. That *shouldn't* change the nodes that the client * communicates with. */ From 5e0b20b96c6d2e7ea59fa740c215b415ebe33549 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 4 Jun 2018 10:27:42 -0400 Subject: [PATCH 10/23] Restort test --- .../elasticsearch/client/DeadHostState.java | 8 +++++++ .../org/elasticsearch/client/RestClient.java | 2 +- .../client/DeadHostStateTests.java | 23 +++++++++++++++++++ 3 files changed, 32 insertions(+), 1 deletion(-) diff --git a/client/rest/src/main/java/org/elasticsearch/client/DeadHostState.java b/client/rest/src/main/java/org/elasticsearch/client/DeadHostState.java index 1d4e638e068f4..53723faccc38d 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/DeadHostState.java +++ b/client/rest/src/main/java/org/elasticsearch/client/DeadHostState.java @@ -60,6 +60,14 @@ final class DeadHostState implements Comparable { this.failedAttempts = previousDeadHostState.failedAttempts + 1; } + /** + * The number of nanoseconds until this host should be revived. + * Negative values mean that we can revive the host now. + */ + long nanosUntilRevival(long nowInNanos) { + return nowInNanos - deadUntilNanos; + } + /** * Returns the timestamp (nanos) till the host is supposed to stay dead without being retried. * After that the host should be retried. diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java index 3de1d6ee169a2..cfa35844967ca 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java @@ -632,7 +632,7 @@ static List selectHosts(NodeTuple> nodeTuple, livingNodes.add(node); continue; } - long nanosUntilRevival = now - deadness.getDeadUntilNanos(); + long nanosUntilRevival = deadness.nanosUntilRevival(now); if (nanosUntilRevival > 0) { livingNodes.add(node); continue; diff --git a/client/rest/src/test/java/org/elasticsearch/client/DeadHostStateTests.java b/client/rest/src/test/java/org/elasticsearch/client/DeadHostStateTests.java index ac8b9e748de19..958a62401d458 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/DeadHostStateTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/DeadHostStateTests.java @@ -26,6 +26,7 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThan; +import static org.hamcrest.Matchers.lessThanOrEqualTo; public class DeadHostStateTests extends RestClientTestCase { @@ -65,6 +66,28 @@ public void testCompareToDefaultTimeSupplier() { } } + public void testNanosUntilRevival() { + DeadHostState deadHostState = null; + for (int i = 0; i < EXPECTED_TIMEOUTS_SECONDS.length; i++) { + long expectedTimeoutSecond = EXPECTED_TIMEOUTS_SECONDS[i]; + long now = 0; + if (i == 0) { + deadHostState = new DeadHostState(0); + } else { + deadHostState = new DeadHostState(deadHostState, 0); + } + for (int j = 0; j < expectedTimeoutSecond; j++) { + now += TimeUnit.SECONDS.toNanos(1); + assertThat(deadHostState.nanosUntilRevival(now), lessThanOrEqualTo(0L)); + } + int iters = randomIntBetween(5, 30); + for (int j = 0; j < iters; j++) { + now += TimeUnit.SECONDS.toNanos(1); + assertThat(deadHostState.nanosUntilRevival(now), greaterThan(0L)); + } + } + } + public void testDeadHostStateTimeouts() { DeadHostState previous = new DeadHostState(0); for (long expectedTimeoutsSecond : EXPECTED_TIMEOUTS_SECONDS) { From 9504841b2382c70dabfa8e3d3213dc735ad78e8c Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 4 Jun 2018 15:29:52 -0400 Subject: [PATCH 11/23] Sniff on yaml test start --- .../org/elasticsearch/client/RestClient.java | 14 +++- .../elasticsearch/client/sniff/Sniffer.java | 2 +- .../client/sniff/SnifferTests.java | 8 +-- .../test/rest/yaml/ClientYamlTestClient.java | 2 - .../yaml/ClientYamlTestExecutionContext.java | 10 +-- .../rest/yaml/ESClientYamlSuiteTestCase.java | 71 +++---------------- .../ClientYamlTestExecutionContextTests.java | 22 +----- .../yaml/ESClientYamlSuiteTestCaseTests.java | 63 ---------------- .../rest/yaml/section/DoSectionTests.java | 11 ++- 9 files changed, 38 insertions(+), 165 deletions(-) diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java index cfa35844967ca..3bbf36f098964 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java @@ -148,8 +148,18 @@ public static RestClientBuilder builder(HttpHost... hosts) { /** * Replaces the nodes with which the client communicates. */ - public synchronized void setNodes(Node... nodes) { - if (nodes == null || nodes.length == 0) { + public void setNodes(Node... nodes) { + if (nodes == null) { + throw new IllegalArgumentException("nodes must not be null or empty"); + } + setNodes(Arrays.asList(nodes)); + } + + /** + * Replaces the nodes with which the client communicates. + */ + public synchronized void setNodes(Collection nodes) { + if (nodes == null || nodes.isEmpty()) { throw new IllegalArgumentException("nodes must not be null or empty"); } AuthCache authCache = new BasicAuthCache(); diff --git a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java index c48c094fad7ec..51f5df25f9ceb 100644 --- a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java +++ b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java @@ -213,7 +213,7 @@ final void sniff() throws IOException { if (sniffedNodes.isEmpty()) { logger.warn("no nodes to set, nodes will be updated at the next sniffing round"); } else { - restClient.setNodes(sniffedNodes.toArray(new Node[sniffedNodes.size()])); + restClient.setNodes(sniffedNodes); } } diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java index 17fe558698150..1d4542c99050a 100644 --- a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java @@ -25,7 +25,6 @@ import org.elasticsearch.client.RestClientTestCase; import org.elasticsearch.client.sniff.Sniffer.DefaultScheduler; import org.elasticsearch.client.sniff.Sniffer.Scheduler; -import org.mockito.Matchers; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; @@ -64,6 +63,7 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyCollectionOf; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -205,7 +205,7 @@ public void shutdown() { int totalRuns = nodesSniffer.runs.get(); assertEquals(iters, totalRuns); int setNodesRuns = totalRuns - nodesSniffer.failures.get() - nodesSniffer.emptyList.get(); - verify(restClient, times(setNodesRuns)).setNodes(Matchers.anyVararg()); + verify(restClient, times(setNodesRuns)).setNodes(anyCollectionOf(Node.class)); verifyNoMoreInteractions(restClient); } @@ -272,7 +272,7 @@ public void shutdown() { int totalRuns = nodesSniffer.runs.get(); assertEquals(0, totalRuns); int setNodesRuns = totalRuns - nodesSniffer.failures.get() - nodesSniffer.emptyList.get(); - verify(restClient, times(setNodesRuns)).setNodes(Matchers.anyVararg()); + verify(restClient, times(setNodesRuns)).setNodes(anyCollectionOf(Node.class)); verifyNoMoreInteractions(restClient); } @@ -417,7 +417,7 @@ public void run() { assertEquals(completedTasks, nodesSniffer.runs.get()); int setNodesRuns = nodesSniffer.runs.get() - nodesSniffer.failures.get() - nodesSniffer.emptyList.get(); - verify(restClient, times(setNodesRuns)).setNodes(Matchers.anyVararg()); + verify(restClient, times(setNodesRuns)).setNodes(anyCollectionOf(Node.class)); verifyNoMoreInteractions(restClient); } finally { executor.shutdown(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java index ba26dc5405672..7fcb61fc9a5b8 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java @@ -19,12 +19,10 @@ package org.elasticsearch.test.rest.yaml; import com.carrotsearch.randomizedtesting.RandomizedTest; -import org.apache.http.Header; import org.apache.http.HttpEntity; import org.apache.http.HttpHost; import org.apache.http.client.methods.HttpGet; import org.apache.http.entity.ContentType; -import org.apache.http.message.BasicHeader; import org.apache.http.util.EntityUtils; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java index 850441922f069..4061b627cd816 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java @@ -26,7 +26,6 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; import org.elasticsearch.client.NodeSelector; -import org.elasticsearch.common.CheckedRunnable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -58,12 +57,9 @@ public class ClientYamlTestExecutionContext { private ClientYamlTestResponse response; private final boolean randomizeContentType; - private final CheckedRunnable setNodeMetadata; - ClientYamlTestExecutionContext(ClientYamlTestClient clientYamlTestClient, - CheckedRunnable setNodeMetadata, boolean randomizeContentType) { + ClientYamlTestExecutionContext(ClientYamlTestClient clientYamlTestClient, boolean randomizeContentType) { this.clientYamlTestClient = clientYamlTestClient; - this.setNodeMetadata = setNodeMetadata; this.randomizeContentType = randomizeContentType; } @@ -99,10 +95,6 @@ public ClientYamlTestResponse callApi(String apiName, Map params } } - if (nodeSelector != NodeSelector.ANY) { - setNodeMetadata.run(); - } - HttpEntity entity = createEntity(bodies, requestHeaders); try { response = callApiInternal(apiName, requestParams, entity, requestHeaders, nodeSelector); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java index 1939d27d040b0..bbe43cd80f18b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java @@ -109,6 +109,11 @@ public static void initializeUseDefaultNumberOfShards() { @Before public void initAndResetContext() throws Exception { if (restTestExecutionContext == null) { + // Sniff host metadata in case we need it in the yaml tests + List nodesWithMetadata = sniffHostMetadata(adminClient()); + client().setNodes(nodesWithMetadata); + adminClient().setNodes(nodesWithMetadata); + assert adminExecutionContext == null; assert blacklistPathMatchers == null; final ClientYamlSuiteRestSpec restSpec = ClientYamlSuiteRestSpec.load(SPEC_PATH); @@ -119,10 +124,8 @@ public void initAndResetContext() throws Exception { final Version masterVersion = versionVersionTuple.v2(); logger.info("initializing client, minimum es version [{}], master version, [{}], hosts {}", esVersion, masterVersion, hosts); final ClientYamlTestClient clientYamlTestClient = initClientYamlTestClient(restSpec, client(), hosts, esVersion, masterVersion); - restTestExecutionContext = new ClientYamlTestExecutionContext(clientYamlTestClient, - () -> sniffHostMetadata(client()), randomizeContentType()); - adminExecutionContext = new ClientYamlTestExecutionContext(clientYamlTestClient, - () -> sniffHostMetadata(adminClient()), false); + restTestExecutionContext = new ClientYamlTestExecutionContext(clientYamlTestClient, randomizeContentType()); + adminExecutionContext = new ClientYamlTestExecutionContext(clientYamlTestClient, false); final String[] blacklist = resolvePathsProperty(REST_TESTS_BLACKLIST, null); blacklistPathMatchers = new ArrayList<>(); for (final String entry : blacklist) { @@ -380,28 +383,9 @@ protected boolean randomizeContentType() { } /** - * Sniff the cluster for host metadata if it hasn't already been sniffed. This isn't the - * same thing as using the {@link Sniffer} because: - *
      - *
    • It doesn't replace the hosts that that {@link #client} communicates with - *
    • If there is already host metadata it skips running. This behavior isn't - * thread safe but it doesn't have to be for our tests. - *
    + * Sniff the cluster for host metadata. */ - private void sniffHostMetadata(RestClient client) throws IOException { - List nodes = client.getNodes(); - boolean allHaveRoles = true; - for (Node node : nodes) { - if (node.getRoles() == null) { - allHaveRoles = false; - break; - } - } - if (allHaveRoles) { - // We already have resolved metadata. - return; - } - // No resolver, sniff one time and resolve metadata against the results + private List sniffHostMetadata(RestClient client) throws IOException { ElasticsearchNodesSniffer.Scheme scheme = ElasticsearchNodesSniffer.Scheme.valueOf(getProtocol().toUpperCase(Locale.ROOT)); /* @@ -413,41 +397,6 @@ private void sniffHostMetadata(RestClient client) throws IOException { */ ElasticsearchNodesSniffer sniffer = new ElasticsearchNodesSniffer( adminClient(), ElasticsearchNodesSniffer.DEFAULT_SNIFF_REQUEST_TIMEOUT, scheme); - attachSniffedMetadataOnClient(client, nodes, sniffer.sniff()); - } - - static void attachSniffedMetadataOnClient(RestClient client, List originalNodes, List nodesWithMetadata) { - Set originalHosts = originalNodes.stream() - .map(Node::getHost) - .collect(Collectors.toSet()); - List sniffed = new ArrayList<>(); - for (Node node : nodesWithMetadata) { - /* - * getHost is the publish_address of the node which, sometimes, is - * ipv6 and, sometimes, our original address for the node is ipv4. - * In that case the ipv4 address should be in getBoundHosts. If it - * isn't then we'll end up without the right number of hosts which - * will fail down below with a pretty error message. - */ - if (originalHosts.contains(node.getHost())) { - sniffed.add(node); - } else { - for (HttpHost bound : node.getBoundHosts()) { - if (originalHosts.contains(bound)) { - sniffed.add(node.withHost(bound)); - break; - } - } - } - } - int missing = originalNodes.size() - sniffed.size(); - if (missing > 0) { - List hosts = originalNodes.stream() - .map(Node::getHost) - .collect(Collectors.toList()); - throw new IllegalStateException("Didn't sniff metadata for all nodes. Wanted metadata for " - + hosts + " but got " + sniffed); - } - client.setNodes(sniffed.toArray(new Node[0])); + return sniffer.sniff(); } } diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java index 90d8c68775655..fbf7f10e5e186 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java @@ -23,14 +23,10 @@ import org.elasticsearch.client.NodeSelector; import org.elasticsearch.test.ESTestCase; -import static java.util.Collections.emptyList; -import static java.util.Collections.emptyMap; - import java.io.IOException; import java.util.Collections; import java.util.HashMap; import java.util.Map; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; public class ClientYamlTestExecutionContextTests extends ESTestCase { @@ -38,7 +34,7 @@ public class ClientYamlTestExecutionContextTests extends ESTestCase { public void testHeadersSupportStashedValueReplacement() throws IOException { final AtomicReference> headersRef = new AtomicReference<>(); final ClientYamlTestExecutionContext context = - new ClientYamlTestExecutionContext(null, () -> {}, randomBoolean()) { + new ClientYamlTestExecutionContext(null, randomBoolean()) { @Override ClientYamlTestResponse callApiInternal(String apiName, Map params, HttpEntity entity, Map headers, NodeSelector nodeSelector) { @@ -61,20 +57,4 @@ ClientYamlTestResponse callApiInternal(String apiName, Map param assertEquals("foo2", headersRef.get().get("foo")); assertEquals("baz bar1", headersRef.get().get("foo1")); } - - public void testNonDefaultNodeSelectorSetsNodeMetadata() throws IOException { - AtomicBoolean setHostMetadata = new AtomicBoolean(false); - final ClientYamlTestExecutionContext context = - new ClientYamlTestExecutionContext(null, () -> setHostMetadata.set(true), randomBoolean()) { - @Override - ClientYamlTestResponse callApiInternal(String apiName, Map params, - HttpEntity entity, Map headers, NodeSelector nodeSelector) { - return null; - } - }; - context.callApi(randomAlphaOfLength(2), emptyMap(), emptyList(), emptyMap(), NodeSelector.ANY); - assertFalse(setHostMetadata.get()); - context.callApi(randomAlphaOfLength(2), emptyMap(), emptyList(), emptyMap(), NodeSelector.NOT_MASTER_ONLY); - assertTrue(setHostMetadata.get()); - } } diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCaseTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCaseTests.java index 9ba003f7405bb..ae64dbc893d81 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCaseTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCaseTests.java @@ -20,25 +20,14 @@ import java.nio.file.Files; import java.nio.file.Path; -import java.util.Arrays; -import java.util.HashSet; -import java.util.List; import java.util.Map; import java.util.Set; -import org.apache.http.HttpHost; -import org.elasticsearch.client.Node; -import org.elasticsearch.client.RestClient; import org.elasticsearch.test.ESTestCase; -import static java.util.Collections.emptySet; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.notNullValue; import static org.hamcrest.Matchers.greaterThan; -import static org.mockito.Mockito.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; public class ESClientYamlSuiteTestCaseTests extends ESTestCase { @@ -102,56 +91,4 @@ private static void assertSingleFile(Set files, String dirName, String fil assertThat(file.getFileName().toString(), equalTo(fileName)); assertThat(file.toAbsolutePath().getParent().getFileName().toString(), equalTo(dirName)); } - - public void testAttachSniffedMetadataOnClientOk() { - RestClient client = mock(RestClient.class); - List originalNodes = Arrays.asList( - new Node(new HttpHost("1")), - new Node(new HttpHost("2")), - new Node(new HttpHost("3"))); - List nodesWithMetadata = Arrays.asList(new Node[] { - // This node matches exactly: - new Node(new HttpHost("1"), emptySet(), randomAlphaOfLength(5), - randomAlphaOfLength(5), randomRoles()), - // This node also matches exactly but has bound hosts which don't matter: - new Node(new HttpHost("2"), - new HashSet<>(Arrays.asList(new HttpHost("2"), new HttpHost("not2"))), - randomAlphaOfLength(5), randomAlphaOfLength(5), randomRoles()), - // This node's host doesn't match but one of its published hosts does so - // we return a modified version of it: - new Node(new HttpHost("not3"), - new HashSet<>(Arrays.asList(new HttpHost("not3"), new HttpHost("3"))), - randomAlphaOfLength(5), randomAlphaOfLength(5), randomRoles()), - // This node isn't in the original list so it isn't added: - new Node(new HttpHost("4"), emptySet(), randomAlphaOfLength(5), - randomAlphaOfLength(5), randomRoles()), - }); - ESClientYamlSuiteTestCase.attachSniffedMetadataOnClient(client, originalNodes, nodesWithMetadata); - verify(client).setNodes(new Node[] { - nodesWithMetadata.get(0), - nodesWithMetadata.get(1), - nodesWithMetadata.get(2).withHost(new HttpHost("3")), - }); - } - - public void testAttachSniffedMetadataOnClientNotEnoughNodes() { - // Try a version of the call that should fail because it doesn't have all the results - RestClient client = mock(RestClient.class); - List originalNodes = Arrays.asList( - new Node(new HttpHost("1")), - new Node(new HttpHost("2"))); - List nodesWithMetadata = Arrays.asList(new Node[] { - // This node matches exactly: - new Node(new HttpHost("1"), emptySet(), "n", "v", new Node.Roles(true, true, true)), - }); - IllegalStateException e = expectThrows(IllegalStateException.class, () -> - ESClientYamlSuiteTestCase.attachSniffedMetadataOnClient(client, originalNodes, nodesWithMetadata)); - assertEquals(e.getMessage(), "Didn't sniff metadata for all nodes. Wanted metadata for " - + "[http://1, http://2] but got [[host=http://1, bound=[], name=n, version=v, roles=mdi]]"); - verify(client, never()).setNodes(any(Node[].class)); - } - - private Node.Roles randomRoles() { - return new Node.Roles(randomBoolean(), randomBoolean(), randomBoolean()); - } } diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java index a5687ab0b7dd0..719044cfc81c2 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java @@ -33,7 +33,9 @@ import org.hamcrest.MatcherAssert; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; +import java.util.List; import java.util.Map; import static java.util.Collections.emptyList; @@ -523,8 +525,13 @@ public void testNodeSelector() throws IOException { Node v521 = nodeWithVersion("5.2.1"); Node v550 = nodeWithVersion("5.5.0"); Node v612 = nodeWithVersion("6.1.2"); - assertEquals(Arrays.asList(v521, v550), doSection.getApiCallSection().getNodeSelector() - .select(Arrays.asList(v170, v521, v550, v612))); + List nodes = new ArrayList<>(); + nodes.add(v170); + nodes.add(v521); + nodes.add(v550); + nodes.add(v612); + doSection.getApiCallSection().getNodeSelector().select(nodes); + assertEquals(Arrays.asList(v521, v550), nodes); ClientYamlTestExecutionContext context = mock(ClientYamlTestExecutionContext.class); ClientYamlTestResponse mockResponse = mock(ClientYamlTestResponse.class); when(context.callApi("indices.get_field_mapping", singletonMap("index", "test_index"), From 2bc05b3b8de77745158afbf2ca3fa41dd00a2551 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 4 Jun 2018 15:43:27 -0400 Subject: [PATCH 12/23] Remove done todo --- .../org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java | 1 - 1 file changed, 1 deletion(-) diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java index 7fcb61fc9a5b8..99da661402855 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java @@ -191,7 +191,6 @@ public ClientYamlTestResponse callApi(String apiName, Map params protected static void setOptions(Request request, Map headers, NodeSelector nodeSelector) { RequestOptions.Builder options = request.getOptions().toBuilder(); - // TODO check that I'm not changing this in this PR. I don't mean to but merge issues. for (Map.Entry header : headers.entrySet()) { logger.debug("Adding header {} with value {}", header.getKey(), header.getValue()); options.addHeader(header.getKey(), header.getValue()); From 159ad5680958266dca16a688d695af2fa8bde95a Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 5 Jun 2018 17:49:58 -0400 Subject: [PATCH 13/23] Cleanup from review Biggest thing is removing a varargs in favor of a `Collection` version. --- .../elasticsearch/client/NodeSelector.java | 18 ++--- .../org/elasticsearch/client/RestClient.java | 38 ++++++----- .../client/RestClientBuilder.java | 7 +- .../client/HostsTrackingFailureListener.java | 9 +-- .../client/NodeSelectorTests.java | 13 ++-- .../client/RequestOptionsTests.java | 2 +- .../client/RestClientMultipleHostsTests.java | 26 +++---- .../client/RestClientSingleHostTests.java | 10 +-- .../elasticsearch/client/RestClientTests.java | 68 ++++++++++++++----- .../elasticsearch/client/sniff/Sniffer.java | 4 +- client/sniffer/src/test/resources/readme.txt | 2 +- .../rest/yaml/ESClientYamlSuiteTestCase.java | 7 -- .../exporter/http/HttpExporterTests.java | 3 - 13 files changed, 121 insertions(+), 86 deletions(-) diff --git a/client/rest/src/main/java/org/elasticsearch/client/NodeSelector.java b/client/rest/src/main/java/org/elasticsearch/client/NodeSelector.java index c9c3f722e5538..c6879296fe8c4 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/NodeSelector.java +++ b/client/rest/src/main/java/org/elasticsearch/client/NodeSelector.java @@ -35,18 +35,16 @@ public interface NodeSelector { * iterate the nodes as many times as they need. *

    * This may be called twice per request: first for "living" nodes that - * have not been blacklisted by previous errors. In this case the order - * of the nodes is the order in which the client thinks that they should - * be tried next. If the selector removes all nodes from the list or if - * there aren't any living nodes then the the client will call this method - * with a list of "dead" nodes. In this case the list is sorted "soonest - * to be revived" first. In this case the rest client will only attempt - * the first node. + * have not been blacklisted by previous errors. If the selector removes + * all nodes from the list or if there aren't any living nodes then the + * {@link RestClient} will call this method with a list of "dead" nodes. + *

    + * Implementers should not rely on the ordering of the nodes. */ void select(Iterable nodes); /* * We were fairly careful with our choice of Iterable here. The caller has - * a List but reordering the list is likely to break round robin. Luckilly + * a List but reordering the list is likely to break round robin. Luckily * Iterable doesn't allow any reordering. */ @@ -76,7 +74,9 @@ public void select(Iterable nodes) { for (Iterator itr = nodes.iterator(); itr.hasNext();) { Node node = itr.next(); if (node.getRoles() == null) continue; - if (node.getRoles().isMasterEligible() && false == node.getRoles().isData()) { + if (node.getRoles().isMasterEligible() + && false == node.getRoles().isData() + && false == node.getRoles().isIngest()) { itr.remove(); } } diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java index 3bbf36f098964..3a3dcd06705e9 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java @@ -85,7 +85,7 @@ *

    * Must be created using {@link RestClientBuilder}, which allows to set all the different options or just rely on defaults. * The hosts that are part of the cluster need to be provided at creation time, but can also be replaced later - * by calling {@link #setNodes(Node...)}. + * by calling {@link #setNodes(Collection)}. *

    * The method {@link #performRequest(String, String, Map, HttpEntity, Header...)} allows to send a request to the cluster. When * sending a request, a host gets selected out of the provided ones in a round-robin fashion. Failing hosts are marked dead and @@ -113,7 +113,7 @@ public class RestClient implements Closeable { private volatile NodeTuple> nodeTuple; RestClient(CloseableHttpAsyncClient client, long maxRetryTimeoutMillis, Header[] defaultHeaders, - Node[] nodes, String pathPrefix, FailureListener failureListener) { + List nodes, String pathPrefix, FailureListener failureListener) { this.client = client; this.maxRetryTimeoutMillis = maxRetryTimeoutMillis; this.defaultHeaders = Collections.unmodifiableList(Arrays.asList(defaultHeaders)); @@ -146,13 +146,22 @@ public static RestClientBuilder builder(HttpHost... hosts) { } /** - * Replaces the nodes with which the client communicates. + * Replaces the hosts with which the client communicates. + * + * @deprecated prefer {@link setNodes} because it allows you + * to set metadata for use with {@link NodeSelector}s */ - public void setNodes(Node... nodes) { - if (nodes == null) { - throw new IllegalArgumentException("nodes must not be null or empty"); + @Deprecated + public void setHosts(HttpHost... hosts) { + if (hosts == null || hosts.length == 0) { + throw new IllegalArgumentException("hosts must not be null nor empty"); } - setNodes(Arrays.asList(nodes)); + List nodes = new ArrayList<>(); + for (HttpHost host : hosts) { + nodes.add(new Node( + Objects.requireNonNull(host, "host cannot be null"))); + } + setNodes(nodes); } /** @@ -164,17 +173,15 @@ public synchronized void setNodes(Collection nodes) { } AuthCache authCache = new BasicAuthCache(); - Map nodeSet = new LinkedHashMap<>(); + Map nodesByHost = new LinkedHashMap<>(); for (Node node : nodes) { - if (node == null) { - throw new NullPointerException("node cannot be null"); - } + Objects.requireNonNull(node, "node cannot be null"); // TODO should we throw an IAE if this happens? - nodeSet.put(node.getHost(), node); + nodesByHost.put(node.getHost(), node); authCache.put(node.getHost(), new BasicScheme()); } this.nodeTuple = new NodeTuple<>( - Collections.unmodifiableList(new ArrayList<>(nodeSet.values())), authCache); + Collections.unmodifiableList(new ArrayList<>(nodesByHost.values())), authCache); this.blacklist.clear(); } @@ -612,11 +619,6 @@ private void setHeaders(HttpRequest httpRequest, Collection

    requestHeade * If there are no living nodes that match the {@link NodeSelector} * this will return the dead node that matches the {@link NodeSelector} * that is closest to being revived. - *

    - * If no living and no dead nodes match the selector we retry a few - * times to handle concurrent modifications of the list of dead nodes. - * We never block the thread or {@link Thread#sleep} or anything like - * that. If the retries fail this throws a {@link IOException}. * @throws IOException if no nodes are available */ private NodeTuple> nextNode(NodeSelector nodeSelector) throws IOException { diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java b/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java index 38edfc6fedd27..f8084af7402c8 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java @@ -20,7 +20,6 @@ package org.elasticsearch.client; import org.apache.http.Header; -import org.apache.http.HttpHost; import org.apache.http.client.config.RequestConfig; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClientBuilder; @@ -32,6 +31,8 @@ import java.security.AccessController; import java.security.NoSuchAlgorithmException; import java.security.PrivilegedAction; +import java.util.Arrays; +import java.util.List; import java.util.Objects; /** @@ -48,7 +49,7 @@ public final class RestClientBuilder { private static final Header[] EMPTY_HEADERS = new Header[0]; - private final Node[] nodes; + private final List nodes; private int maxRetryTimeout = DEFAULT_MAX_RETRY_TIMEOUT_MILLIS; private Header[] defaultHeaders = EMPTY_HEADERS; private RestClient.FailureListener failureListener; @@ -70,7 +71,7 @@ public final class RestClientBuilder { throw new IllegalArgumentException("node cannot be null"); } } - this.nodes = nodes; + this.nodes = Arrays.asList(nodes); } /** diff --git a/client/rest/src/test/java/org/elasticsearch/client/HostsTrackingFailureListener.java b/client/rest/src/test/java/org/elasticsearch/client/HostsTrackingFailureListener.java index f882496838dc0..6c952fcf94759 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/HostsTrackingFailureListener.java +++ b/client/rest/src/test/java/org/elasticsearch/client/HostsTrackingFailureListener.java @@ -22,6 +22,7 @@ import org.apache.http.HttpHost; import java.util.HashSet; +import java.util.List; import java.util.Set; import static org.hamcrest.Matchers.containsInAnyOrder; @@ -39,10 +40,10 @@ public void onFailure(Node node) { hosts.add(node.getHost()); } - void assertCalled(Node... nodes) { - HttpHost[] hosts = new HttpHost[nodes.length]; - for (int i = 0; i < nodes.length; i++) { - hosts[i] = nodes[i].getHost(); + void assertCalled(List nodes) { + HttpHost[] hosts = new HttpHost[nodes.size()]; + for (int i = 0 ; i < nodes.size(); i++) { + hosts[i] = nodes.get(i).getHost(); } assertCalled(hosts); } diff --git a/client/rest/src/test/java/org/elasticsearch/client/NodeSelectorTests.java b/client/rest/src/test/java/org/elasticsearch/client/NodeSelectorTests.java index 12f5f5e2cc75e..d9df001ad437e 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/NodeSelectorTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/NodeSelectorTests.java @@ -23,7 +23,6 @@ import org.elasticsearch.client.Node.Roles; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -42,14 +41,20 @@ public void testAny() { } public void testNotMasterOnly() { - Node masterOnly = dummyNode(true, false, randomBoolean()); - Node masterAndData = dummyNode(true, true, randomBoolean()); - Node coordinatingOnly = dummyNode(false, false, randomBoolean()); + Node masterOnly = dummyNode(true, false, false); + Node all = dummyNode(true, true, true); + Node masterAndData = dummyNode(true, true, false); + Node masterAndIngest = dummyNode(true, false, true); + Node coordinatingOnly = dummyNode(false, false, false); + Node ingestOnly = dummyNode(false, false, true); Node data = dummyNode(false, true, randomBoolean()); List nodes = new ArrayList<>(); nodes.add(masterOnly); + nodes.add(all); nodes.add(masterAndData); + nodes.add(masterAndIngest); nodes.add(coordinatingOnly); + nodes.add(ingestOnly); nodes.add(data); Collections.shuffle(nodes, getRandom()); List expected = new ArrayList<>(nodes); diff --git a/client/rest/src/test/java/org/elasticsearch/client/RequestOptionsTests.java b/client/rest/src/test/java/org/elasticsearch/client/RequestOptionsTests.java index 2466e987f4230..a78be6c126bae 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RequestOptionsTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RequestOptionsTests.java @@ -131,7 +131,7 @@ private static RequestOptions copy(RequestOptions options) { private static RequestOptions mutate(RequestOptions options) { RequestOptions.Builder mutant = options.toBuilder(); - int mutationType = between(0, 1); + int mutationType = between(0, 2); switch (mutationType) { case 0: mutant.addHeader("extra", "m"); diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java index f51d3740bf4bd..431b170e59761 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java @@ -43,6 +43,7 @@ import java.io.IOException; import java.net.SocketTimeoutException; +import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; import java.util.Iterator; @@ -76,7 +77,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase { private ExecutorService exec = Executors.newFixedThreadPool(1); private RestClient restClient; - private Node[] nodes; + private List nodes; private HostsTrackingFailureListener failureListener; @Before @@ -114,10 +115,11 @@ public void run() { } }); int numNodes = RandomNumbers.randomIntBetween(getRandom(), 2, 5); - nodes = new Node[numNodes]; + nodes = new ArrayList<>(numNodes); for (int i = 0; i < numNodes; i++) { - nodes[i] = new Node(new HttpHost("localhost", 9200 + i)); + nodes.add(new Node(new HttpHost("localhost", 9200 + i))); } + nodes = Collections.unmodifiableList(nodes); failureListener = new HostsTrackingFailureListener(); restClient = new RestClient(httpClient, 10000, new Header[0], nodes, null, failureListener); } @@ -134,7 +136,7 @@ public void testRoundRobinOkStatusCodes() throws IOException { int numIters = RandomNumbers.randomIntBetween(getRandom(), 1, 5); for (int i = 0; i < numIters; i++) { Set hostsSet = hostsSet(); - for (int j = 0; j < nodes.length; j++) { + for (int j = 0; j < nodes.size(); j++) { int statusCode = randomOkStatusCode(getRandom()); Response response = restClient.performRequest(randomHttpMethod(getRandom()), "/" + statusCode); assertEquals(statusCode, response.getStatusLine().getStatusCode()); @@ -149,7 +151,7 @@ public void testRoundRobinNoRetryErrors() throws IOException { int numIters = RandomNumbers.randomIntBetween(getRandom(), 1, 5); for (int i = 0; i < numIters; i++) { Set hostsSet = hostsSet(); - for (int j = 0; j < nodes.length; j++) { + for (int j = 0; j < nodes.size(); j++) { String method = randomHttpMethod(getRandom()); int statusCode = randomErrorNoRetryStatusCode(getRandom()); try { @@ -234,7 +236,7 @@ public void testRoundRobinRetryErrors() throws IOException { for (int i = 1; i <= numIters; i++) { //check that one different host is resurrected at each new attempt Set hostsSet = hostsSet(); - for (int j = 0; j < nodes.length; j++) { + for (int j = 0; j < nodes.size(); j++) { retryEndpoint = randomErrorRetryEndpoint(); try { restClient.performRequest(randomHttpMethod(getRandom()), retryEndpoint); @@ -314,7 +316,7 @@ public void testNodeSelector() throws IOException { public void select(Iterable restClientNodes) { boolean found = false; for (Iterator itr = restClientNodes.iterator(); itr.hasNext();) { - if (nodes[0] == itr.next()) { + if (nodes.get(0) == itr.next()) { found = true; } else { itr.remove(); @@ -334,15 +336,15 @@ public void select(Iterable restClientNodes) { options.setNodeSelector(firstPositionOnly); request.setOptions(options); Response response = restClient.performRequest(request); - assertEquals(nodes[0].getHost(), response.getHost()); + assertEquals(nodes.get(0).getHost(), response.getHost()); } } public void testSetNodes() throws IOException { - Node[] newNodes = new Node[nodes.length]; - for (int i = 0; i < nodes.length; i++) { + List newNodes = new ArrayList<>(nodes.size()); + for (int i = 0; i < nodes.size(); i++) { Roles roles = i == 0 ? new Roles(false, true, true) : new Roles(true, false, false); - newNodes[i] = new Node(nodes[i].getHost(), null, null, null, roles); + newNodes.add(new Node(nodes.get(i).getHost(), null, null, null, roles)); } restClient.setNodes(newNodes); int rounds = between(1, 10); @@ -356,7 +358,7 @@ public void testSetNodes() throws IOException { options.setNodeSelector(NodeSelector.NOT_MASTER_ONLY); request.setOptions(options); Response response = restClient.performRequest(request); - assertEquals(newNodes[0].getHost(), response.getHost()); + assertEquals(newNodes.get(0).getHost(), response.getHost()); } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java index 1226a708a53ed..5987fe7dd9849 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java @@ -65,6 +65,7 @@ import java.util.concurrent.Executors; import java.util.concurrent.Future; +import static java.util.Collections.singletonList; import static org.elasticsearch.client.RestClientTestUtil.getAllErrorStatusCodes; import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods; import static org.elasticsearch.client.RestClientTestUtil.getOkStatusCodes; @@ -148,7 +149,8 @@ public void run() { defaultHeaders = RestClientTestUtil.randomHeaders(getRandom(), "Header-default"); node = new Node(new HttpHost("localhost", 9200)); failureListener = new HostsTrackingFailureListener(); - restClient = new RestClient(httpClient, 10000, defaultHeaders, new Node[] {node}, null, failureListener); + restClient = new RestClient(httpClient, 10000, defaultHeaders, + singletonList(node), null, failureListener); } /** @@ -244,7 +246,7 @@ public void testErrorStatusCodes() throws IOException { if (errorStatusCode <= 500 || expectedIgnores.contains(errorStatusCode)) { failureListener.assertNotCalled(); } else { - failureListener.assertCalled(node); + failureListener.assertCalled(singletonList(node)); } } } @@ -259,14 +261,14 @@ public void testIOExceptions() { } catch(IOException e) { assertThat(e, instanceOf(ConnectTimeoutException.class)); } - failureListener.assertCalled(node); + failureListener.assertCalled(singletonList(node)); try { performRequest(method, "/soe"); fail("request should have failed"); } catch(IOException e) { assertThat(e, instanceOf(SocketTimeoutException.class)); } - failureListener.assertCalled(node); + failureListener.assertCalled(singletonList(node)); } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java index 98aa925e1bb93..1c96de5ada082 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java @@ -26,6 +26,7 @@ import java.io.IOException; import java.net.URI; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; @@ -50,9 +51,9 @@ public class RestClientTests extends RestClientTestCase { public void testCloseIsIdempotent() throws IOException { - Node[] nodes = new Node[] {new Node(new HttpHost("localhost", 9200))}; + List nodes = singletonList(new Node(new HttpHost("localhost", 9200))); CloseableHttpAsyncClient closeableHttpAsyncClient = mock(CloseableHttpAsyncClient.class); - RestClient restClient = new RestClient(closeableHttpAsyncClient, 1_000, new Header[0], nodes, null, null); + RestClient restClient = new RestClient(closeableHttpAsyncClient, 1_000, new Header[0], nodes, null, null); restClient.close(); verify(closeableHttpAsyncClient, times(1)).close(); restClient.close(); @@ -232,46 +233,77 @@ public void testBuildUriLeavesPathUntouched() { } } + @Deprecated + public void testSetHostsWrongArguments() throws IOException { + try (RestClient restClient = createRestClient()) { + restClient.setHosts((HttpHost[]) null); + fail("setHosts should have failed"); + } catch (IllegalArgumentException e) { + assertEquals("hosts must not be null nor empty", e.getMessage()); + } + try (RestClient restClient = createRestClient()) { + restClient.setHosts(); + fail("setHosts should have failed"); + } catch (IllegalArgumentException e) { + assertEquals("hosts must not be null nor empty", e.getMessage()); + } + try (RestClient restClient = createRestClient()) { + restClient.setHosts((HttpHost) null); + fail("setHosts should have failed"); + } catch (NullPointerException e) { + assertEquals("host cannot be null", e.getMessage()); + } + try (RestClient restClient = createRestClient()) { + restClient.setHosts(new HttpHost("localhost", 9200), null, new HttpHost("localhost", 9201)); + fail("setHosts should have failed"); + } catch (NullPointerException e) { + assertEquals("host cannot be null", e.getMessage()); + } + } + public void testSetNodesWrongArguments() throws IOException { try (RestClient restClient = createRestClient()) { - restClient.setNodes((Node[]) null); + restClient.setNodes(null); fail("setNodes should have failed"); } catch (IllegalArgumentException e) { assertEquals("nodes must not be null or empty", e.getMessage()); } try (RestClient restClient = createRestClient()) { - restClient.setNodes(); + restClient.setNodes(Collections.emptyList()); fail("setNodes should have failed"); } catch (IllegalArgumentException e) { assertEquals("nodes must not be null or empty", e.getMessage()); } try (RestClient restClient = createRestClient()) { - restClient.setNodes((Node) null); + restClient.setNodes(Collections.singletonList((Node) null)); fail("setNodes should have failed"); } catch (NullPointerException e) { assertEquals("node cannot be null", e.getMessage()); } try (RestClient restClient = createRestClient()) { - restClient.setNodes(new Node(new HttpHost("localhost", 9200)), null, new Node(new HttpHost("localhost", 9201))); + restClient.setNodes(Arrays.asList( + new Node(new HttpHost("localhost", 9200)), + null, + new Node(new HttpHost("localhost", 9201)))); fail("setNodes should have failed"); } catch (NullPointerException e) { assertEquals("node cannot be null", e.getMessage()); } } - public void testSetHostsPreservesOrdering() throws Exception { + public void testSetNodesPreservesOrdering() throws Exception { try (RestClient restClient = createRestClient()) { - Node[] hosts = randomNodes(); - restClient.setNodes(hosts); - assertEquals(Arrays.asList(hosts), restClient.getNodes()); + List nodes = randomNodes(); + restClient.setNodes(nodes); + assertEquals(nodes, restClient.getNodes()); } } - private static Node[] randomNodes() { + private static List randomNodes() { int numNodes = randomIntBetween(1, 10); - Node[] nodes = new Node[numNodes]; - for (int i = 0; i < nodes.length; i++) { - nodes[i] = new Node(new HttpHost("host-" + i, 9200)); + List nodes = new ArrayList<>(numNodes); + for (int i = 0; i < numNodes; i++) { + nodes.add(new Node(new HttpHost("host-" + i, 9200))); } return nodes; } @@ -279,10 +311,10 @@ private static Node[] randomNodes() { public void testSetNodesDuplicatedHosts() throws Exception { try (RestClient restClient = createRestClient()) { int numNodes = randomIntBetween(1, 10); - Node[] nodes = new Node[numNodes]; + List nodes = new ArrayList<>(numNodes); Node node = new Node(new HttpHost("host", 9200)); - for (int i = 0; i < nodes.length; i++) { - nodes[i] = node; + for (int i = 0; i < numNodes; i++) { + nodes.add(node); } restClient.setNodes(nodes); assertEquals(1, restClient.getNodes().size()); @@ -448,7 +480,7 @@ public String toString() { } private static RestClient createRestClient() { - Node[] nodes = new Node[] {new Node(new HttpHost("localhost", 9200))}; + List nodes = Collections.singletonList(new Node(new HttpHost("localhost", 9200))); return new RestClient(mock(CloseableHttpAsyncClient.class), randomLongBetween(1_000, 30_000), new Header[] {}, nodes, null, null); } diff --git a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java index 51f5df25f9ceb..73780586e7617 100644 --- a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java +++ b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java @@ -21,7 +21,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.http.HttpHost; import org.elasticsearch.client.Node; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClientBuilder; @@ -30,6 +29,7 @@ import java.io.IOException; import java.security.AccessController; import java.security.PrivilegedAction; +import java.util.Collection; import java.util.List; import java.util.concurrent.Executors; import java.util.concurrent.Future; @@ -229,7 +229,7 @@ public void close() { * Returns a new {@link SnifferBuilder} to help with {@link Sniffer} creation. * * @param restClient the client that gets its hosts set (via - * {@link RestClient#setNodes(Node...)}) once they are fetched + * {@link RestClient#setNodes(Collection)}) once they are fetched * @return a new instance of {@link SnifferBuilder} */ public static SnifferBuilder builder(RestClient restClient) { diff --git a/client/sniffer/src/test/resources/readme.txt b/client/sniffer/src/test/resources/readme.txt index d3a68f2d41da3..ccb9bb15edb55 100644 --- a/client/sniffer/src/test/resources/readme.txt +++ b/client/sniffer/src/test/resources/readme.txt @@ -1,4 +1,4 @@ -`*_node_http.json` contains files created by spining up toy clusters with a +`*_node_http.json` contains files created by spinning up toy clusters with a few nodes in different configurations locally at various versions. They are for testing `ElasticsearchNodesSniffer` against different versions of Elasticsearch. diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java index 7bedfb86ce0da..ea4ff5bbe50a5 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java @@ -397,13 +397,6 @@ protected boolean randomizeContentType() { private List sniffHostMetadata(RestClient client) throws IOException { ElasticsearchNodesSniffer.Scheme scheme = ElasticsearchNodesSniffer.Scheme.valueOf(getProtocol().toUpperCase(Locale.ROOT)); - /* - * We don't want to change the list of nodes that the client communicates with - * because that'd just be rude. So instead we find the nodes - * returned by the sniffer that correspond with the nodes already in the client - * and set the nodes to them. That *shouldn't* change the nodes that the client - * communicates with. - */ ElasticsearchNodesSniffer sniffer = new ElasticsearchNodesSniffer( adminClient(), ElasticsearchNodesSniffer.DEFAULT_SNIFF_REQUEST_TIMEOUT, scheme); return sniffer.sniff(); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterTests.java index 2d42109c455fa..2c8c700fcf615 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterTests.java @@ -27,7 +27,6 @@ import org.elasticsearch.xpack.monitoring.exporter.ClusterAlertsUtil; import org.elasticsearch.xpack.monitoring.exporter.Exporter.Config; import org.junit.Before; -import org.mockito.ArgumentCaptor; import org.mockito.InOrder; import java.io.IOException; @@ -46,8 +45,6 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyMapOf; -import static org.mockito.Matchers.eq; import static org.mockito.Mockito.atMost; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.inOrder; From baf572c68c0e7cfb26102adbfdc1ca4c785c4c2a Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 6 Jun 2018 09:10:17 -0400 Subject: [PATCH 14/23] Fix rotation issue with NodeSelectors Say you have nodes in this order: ``` d1 (data node) m1 (master node) m2 (master node) m3 (master node) d2 (data node) ``` If you used `NodeSelector.NOT_MASTER_ONLY` then the old code would have sent 4x as many requests to `d2` then to `d1` because it rotated the list of nodes *before* sending them to the `NodeSelector`. This moves the sort to after. It also removes a sort from the dead node case, replacing it with a little slight of hand and a `Collections.min` which is going to be faster if there are many blacklisted nodes. It'd be rare for there to be hundreds of the things, but we may as well not waste cycles in that case. I'm not super happy that we have to copy both lists before filtering them just so we can throw a nice error message in case they aren't selected but such is life. --- .../org/elasticsearch/client/RestClient.java | 65 +++++-- .../elasticsearch/client/RestClientTests.java | 161 +++++++++--------- 2 files changed, 126 insertions(+), 100 deletions(-) diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java index 3a3dcd06705e9..a20897d4f07fd 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java @@ -654,18 +654,18 @@ static List selectHosts(NodeTuple> nodeTuple, if (false == livingNodes.isEmpty()) { /* - * Normal state: there is at least one living node. Rotate the - * list so subsequent requests will prefer the nodes in a - * different order then run them through the NodeSelector so it - * can have its say in which nodes are ok and their ordering. If - * the selector is ok with any over the living nodes then use - * them for the request. + * Normal state: there is at least one living node. If the + * selector is ok with any over the living nodes then use them + * for the request. */ - // TODO this is going to send more requests to nodes right *after* a node that the selector removes List selectedLivingNodes = new ArrayList<>(livingNodes); - Collections.rotate(selectedLivingNodes, lastNodeIndex.getAndIncrement()); nodeSelector.select(selectedLivingNodes); if (false == selectedLivingNodes.isEmpty()) { + /* + * Rotate the list so subsequent requests will prefer the + * nodes in a different order. + */ + Collections.rotate(selectedLivingNodes, lastNodeIndex.getAndIncrement()); return selectedLivingNodes; } } @@ -682,21 +682,54 @@ static List selectHosts(NodeTuple> nodeTuple, * node. */ if (false == deadNodes.isEmpty()) { - Collections.sort(deadNodes); - - List selectedDeadNodes = new ArrayList<>(deadNodes.size()); - for (DeadNodeAndRevival n : deadNodes) { - selectedDeadNodes.add(n.node); - } - nodeSelector.select(selectedDeadNodes); + final List selectedDeadNodes = new ArrayList<>(deadNodes); + /* + * We'd like NodeSelectors to remove items directly from deadNodes + * so we can find the minimum after it is filtered without having + * to compare many things. This saves us a sort on the unfiltered + * list. + */ + nodeSelector.select(new Iterable() { + @Override + public Iterator iterator() { + return new Adapter(selectedDeadNodes.iterator()); + } + }); if (false == selectedDeadNodes.isEmpty()) { - return singletonList(selectedDeadNodes.get(0)); + return singletonList(Collections.min(selectedDeadNodes).node); } } throw new IOException("NodeSelector [" + nodeSelector + "] rejected all nodes, " + "living " + livingNodes + " and dead " + deadNodes); } + /** + * Adapts an Iterator into an + * Iterator. + */ + private static class Adapter implements Iterator { + private final Iterator itr; + + private Adapter(Iterator itr) { + this.itr = itr; + } + + @Override + public boolean hasNext() { + return itr.hasNext(); + } + + @Override + public Node next() { + return itr.next().node; + } + + @Override + public void remove() { + itr.remove(); + } + } + /** * Called after each successful request call. * Receives as an argument the host that was used for the successful request. diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java index 1c96de5ada082..cb76aa96b8c05 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java @@ -340,11 +340,9 @@ public void testNullPath() throws IOException { } public void testSelectHosts() throws IOException { - int iterations = 1000; Node n1 = new Node(new HttpHost("1"), null, null, "1", null); Node n2 = new Node(new HttpHost("2"), null, null, "2", null); Node n3 = new Node(new HttpHost("3"), null, null, "3", null); - List nodes = Arrays.asList(n1, n2, n3); NodeSelector not1 = new NodeSelector() { @Override @@ -376,106 +374,101 @@ public String toString() { } }; - NodeTuple> nodeTuple = new NodeTuple<>(nodes, null); - Map blacklist = new HashMap<>(); - AtomicInteger lastNodeIndex = new AtomicInteger(0); - long now = 0; + NodeTuple> nodeTuple = new NodeTuple<>(Arrays.asList(n1, n2, n3), null); - // Normal case - List expectedNodes = Arrays.asList(n1, n2, n3); - assertEquals(expectedNodes, RestClient.selectHosts(nodeTuple, blacklist, - lastNodeIndex, now, NodeSelector.ANY)); - // Calling it again rotates the set of results - for (int i = 0; i < iterations; i++) { - Collections.rotate(expectedNodes, 1); - assertEquals(expectedNodes, RestClient.selectHosts(nodeTuple, blacklist, - lastNodeIndex, now, NodeSelector.ANY)); - } + Map emptyBlacklist = Collections.emptyMap(); - // Exclude some node - lastNodeIndex.set(0); - // h1 excluded - assertEquals(Arrays.asList(n2, n3), RestClient.selectHosts(nodeTuple, blacklist, - lastNodeIndex, now, not1)); - // Calling it again rotates the set of results - assertEquals(Arrays.asList(n3, n2), RestClient.selectHosts(nodeTuple, blacklist, - lastNodeIndex, now, not1)); - // And again, same - assertEquals(Arrays.asList(n2, n3), RestClient.selectHosts(nodeTuple, blacklist, - lastNodeIndex, now, not1)); - /* - * But this time it doesn't because the list being filtered changes - * from (h1, h2, h3) to (h2, h3, h1) which both look the same when - * you filter out h1. - */ - assertEquals(Arrays.asList(n2, n3), RestClient.selectHosts(nodeTuple, blacklist, - lastNodeIndex, now, not1)); + // Normal cases where the node selector doesn't reject all living nodes + assertSelectLivingHosts(Arrays.asList(n1, n2, n3), nodeTuple, emptyBlacklist, 0, NodeSelector.ANY); + assertSelectLivingHosts(Arrays.asList(n2, n3), nodeTuple, emptyBlacklist, 0, not1); /* * Try a NodeSelector that excludes all nodes. This should * throw an exception */ - lastNodeIndex.set(0); - try { - RestClient.selectHosts(nodeTuple, blacklist, lastNodeIndex, now, noNodes); - fail("expected selectHosts to fail"); - } catch (IOException e) { + { String message = "NodeSelector [NONE] rejected all nodes, living [" + "[host=http://1, version=1], [host=http://2, version=2], " + "[host=http://3, version=3]] and dead []"; - assertEquals(message, e.getMessage()); + assertEquals(message, assertSelectAllRejected(nodeTuple, emptyBlacklist, 0, noNodes)); } - /* - * Mark all nodes as dead and look up at a time *after* the - * revival time. This should return all nodes. - */ - blacklist.put(n1.getHost(), new DeadHostState(1)); - blacklist.put(n2.getHost(), new DeadHostState(new DeadHostState(1), 1)); - blacklist.put(n3.getHost(), new DeadHostState(new DeadHostState(new DeadHostState(1), 1), 1)); - lastNodeIndex.set(0); - now = DeadHostState.MAX_CONNECTION_TIMEOUT_NANOS + 1; - expectedNodes = Arrays.asList(n1, n2, n3); - assertEquals(expectedNodes, RestClient.selectHosts(nodeTuple, blacklist, lastNodeIndex, - now, NodeSelector.ANY)); + // Mark all the nodes dead for a few test cases + { + long now = 0; + Map blacklist = new HashMap<>(); + blacklist.put(n1.getHost(), new DeadHostState(now)); + blacklist.put(n2.getHost(), new DeadHostState(new DeadHostState(now), now)); + blacklist.put(n3.getHost(), new DeadHostState(new DeadHostState(new DeadHostState(now), now), now)); + + /* + * selectHosts will revive a single host if regardless of + * blacklist time. It'll revive the node that is closest + * to being revived that the NodeSelector is ok with. + */ + assertEquals(singletonList(n1), RestClient.selectHosts(nodeTuple, blacklist, new AtomicInteger(), now, NodeSelector.ANY)); + assertEquals(singletonList(n2), RestClient.selectHosts(nodeTuple, blacklist, new AtomicInteger(), now, not1)); + + /* + * Try a NodeSelector that excludes all nodes. This should + * return a failure, but a different failure than when the + * blacklist is empty so that the caller knows that all of + * their nodes are blacklisted AND blocked. + */ + String message = "NodeSelector [NONE] rejected all nodes, living [] and dead [" + + "[host=http://1, version=1], [host=http://2, version=2], " + + "[host=http://3, version=3]]"; + assertEquals(message, assertSelectAllRejected(nodeTuple, blacklist, now, noNodes)); + + /* + * Now lets wind the clock forward, past the timeout for one of + * the dead nodes. We should return it. + */ + now = new DeadHostState(now).getDeadUntilNanos(); + assertSelectLivingHosts(Arrays.asList(n1), nodeTuple, blacklist, 0, NodeSelector.ANY); + + /* + * But if the NodeSelector rejects that node then we'll pick the + * first on that the NodeSelector doesn't reject. + */ + assertSelectLivingHosts(Arrays.asList(n2), nodeTuple, blacklist, 0, not1); + + /* + * If we wind the clock way into the future, past any of the + * blacklist timeouts then we function as though the nodes aren't + * in the blacklist at all. + */ + now += DeadHostState.MAX_CONNECTION_TIMEOUT_NANOS; + assertSelectLivingHosts(Arrays.asList(n1, n2, n3), nodeTuple, blacklist, now, NodeSelector.ANY); + assertSelectLivingHosts(Arrays.asList(n2, n3), nodeTuple, blacklist, now, not1); + } + } + + private void assertSelectLivingHosts(List expectedNodes, NodeTuple> nodeTuple, + Map blacklist, long now, NodeSelector nodeSelector) throws IOException { + int iterations = 1000; + AtomicInteger lastNodeIndex = new AtomicInteger(0); + assertEquals(expectedNodes, RestClient.selectHosts(nodeTuple, blacklist, + lastNodeIndex, now, nodeSelector)); // Calling it again rotates the set of results - for (int i = 0; i < iterations; i++) { + for (int i = 1; i < iterations; i++) { Collections.rotate(expectedNodes, 1); - assertEquals(expectedNodes, RestClient.selectHosts(nodeTuple, blacklist, - lastNodeIndex, now, NodeSelector.ANY)); + assertEquals("iteration " + i, expectedNodes, RestClient.selectHosts(nodeTuple, blacklist, + lastNodeIndex, now, nodeSelector)); } + } - /* - * Now try with the nodes dead and *not* past their dead time. - * Only the node closest to revival should come back. - */ - now = 0; - assertEquals(singletonList(n1), RestClient.selectHosts(nodeTuple, blacklist, lastNodeIndex, - now, NodeSelector.ANY)); - - /* - * Now try with the nodes dead and *not* past their dead time - * *and* a node selector that removes the node that is closest - * to being revived. The second closest node should come back. - */ - assertEquals(singletonList(n2), RestClient.selectHosts(nodeTuple, blacklist, - lastNodeIndex, now, not1)); - - /* - * Try a NodeSelector that excludes all nodes. This should - * return a failure, but a different failure than normal - * because it'll block revival rather than outright reject - * healthy nodes. - */ - lastNodeIndex.set(0); + /** + * Assert that {@link RestClient#selectHosts} fails on the provided arguments. + * @return the message in the exception thrown by the failure + */ + private String assertSelectAllRejected( NodeTuple> nodeTuple, + Map blacklist, long now, NodeSelector nodeSelector) { try { - RestClient.selectHosts(nodeTuple, blacklist, lastNodeIndex, now, noNodes); - fail("expected selectHosts to fail"); + RestClient.selectHosts(nodeTuple, blacklist, new AtomicInteger(0), now, nodeSelector); + throw new AssertionError("expected selectHosts to fail"); } catch (IOException e) { - String message = "NodeSelector [NONE] rejected all nodes, living [] and dead [" - + "[host=http://1, version=1], [host=http://2, version=2], " - + "[host=http://3, version=3]]"; - assertEquals(message, e.getMessage()); + return e.getMessage(); } } From 1f00b95fcfcd2e444c56aa5d22caf0108001c6fa Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 6 Jun 2018 11:59:29 -0400 Subject: [PATCH 15/23] Wip --- .../elasticsearch/client/DeadHostState.java | 46 +++++++++---- .../org/elasticsearch/client/RestClient.java | 30 ++++----- .../client/DeadHostStateTests.java | 65 ++++++++++++++----- 3 files changed, 98 insertions(+), 43 deletions(-) diff --git a/client/rest/src/main/java/org/elasticsearch/client/DeadHostState.java b/client/rest/src/main/java/org/elasticsearch/client/DeadHostState.java index 53723faccc38d..f9c6959236829 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/DeadHostState.java +++ b/client/rest/src/main/java/org/elasticsearch/client/DeadHostState.java @@ -33,39 +33,43 @@ final class DeadHostState implements Comparable { private final int failedAttempts; private final long deadUntilNanos; + private final TimeSupplier timeSupplier; /** * Build the initial dead state of a host. Useful when a working host stops functioning * and needs to be marked dead after its first failure. In such case the host will be retried after a minute or so. * - * @param now the current time in nanoseconds. Prefer a source designed to measure elapsed time like {@link System#nanoTime()}. + * @param timeSupplier a way to supply the current time and allow for unit testing */ - DeadHostState(long now) { + DeadHostState(TimeSupplier timeSupplier) { this.failedAttempts = 1; - this.deadUntilNanos = now + MIN_CONNECTION_TIMEOUT_NANOS; + this.deadUntilNanos = timeSupplier.nanoTime() + MIN_CONNECTION_TIMEOUT_NANOS; + this.timeSupplier = timeSupplier; } /** * Build the dead state of a host given its previous dead state. Useful when a host has been failing before, hence * it already failed for one or more consecutive times. The more failed attempts we register the longer we wait * to retry that same host again. Minimum is 1 minute (for a node the only failed once created - * through {@link #DeadHostState(long)}), maximum is 30 minutes (for a node that failed more than 10 consecutive times) + * through {@link #DeadHostState(TimeSupplier)}), maximum is 30 minutes (for a node that failed more than 10 consecutive times) * - * @param now the current time in nanoseconds. Prefer a source designed to measure elapsed time like {@link System#nanoTime()}. + * @param previousDeadHostState the previous state of the host which allows us to increase the wait till the next retry attempt */ - DeadHostState(DeadHostState previousDeadHostState, long now) { + DeadHostState(DeadHostState previousDeadHostState) { long timeoutNanos = (long)Math.min(MIN_CONNECTION_TIMEOUT_NANOS * 2 * Math.pow(2, previousDeadHostState.failedAttempts * 0.5 - 1), MAX_CONNECTION_TIMEOUT_NANOS); - this.deadUntilNanos = now + timeoutNanos; + this.deadUntilNanos = previousDeadHostState.timeSupplier.nanoTime() + timeoutNanos; this.failedAttempts = previousDeadHostState.failedAttempts + 1; + this.timeSupplier = previousDeadHostState.timeSupplier; } /** - * The number of nanoseconds until this host should be revived. - * Negative values mean that we can revive the host now. + * Indicates whether it's time to retry to failed host or not. + * + * @return true if the host should be retried, false otherwise */ - long nanosUntilRevival(long nowInNanos) { - return nowInNanos - deadUntilNanos; + boolean shallBeRetried() { + return timeSupplier.nanoTime() - deadUntilNanos > 0; } /** @@ -90,6 +94,26 @@ public String toString() { return "DeadHostState{" + "failedAttempts=" + failedAttempts + ", deadUntilNanos=" + deadUntilNanos + + ", timeSupplier=" + timeSupplier + '}'; } + + /** + * Time supplier that makes timing aspects pluggable to ease testing + */ + interface TimeSupplier { + TimeSupplier DEFAULT = new TimeSupplier() { + @Override + public long nanoTime() { + return System.nanoTime(); + } + + @Override + public String toString() { + return "nanoTime"; + } + }; + + long nanoTime(); + } } diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java index a20897d4f07fd..d338297d65fb0 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java @@ -46,6 +46,7 @@ import org.apache.http.nio.client.methods.HttpAsyncMethods; import org.apache.http.nio.protocol.HttpAsyncRequestProducer; import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; +import org.elasticsearch.client.DeadHostState.TimeSupplier; import javax.net.ssl.SSLHandshakeException; @@ -637,19 +638,18 @@ static List selectHosts(NodeTuple> nodeTuple, * Sort the nodes into living and dead lists. */ List livingNodes = new ArrayList<>(nodeTuple.nodes.size() - blacklist.size()); - List deadNodes = new ArrayList<>(blacklist.size()); + List deadNodes = new ArrayList<>(blacklist.size()); for (Node node : nodeTuple.nodes) { DeadHostState deadness = blacklist.get(node.getHost()); if (deadness == null) { livingNodes.add(node); continue; } - long nanosUntilRevival = deadness.nanosUntilRevival(now); - if (nanosUntilRevival > 0) { + if (deadness.shallBeRetried()) { livingNodes.add(node); continue; } - deadNodes.add(new DeadNodeAndRevival(node, nanosUntilRevival)); + deadNodes.add(new DeadNodeAndDeadness(node, deadness)); } if (false == livingNodes.isEmpty()) { @@ -682,7 +682,7 @@ static List selectHosts(NodeTuple> nodeTuple, * node. */ if (false == deadNodes.isEmpty()) { - final List selectedDeadNodes = new ArrayList<>(deadNodes); + final List selectedDeadNodes = new ArrayList<>(deadNodes); /* * We'd like NodeSelectors to remove items directly from deadNodes * so we can find the minimum after it is filtered without having @@ -708,9 +708,9 @@ public Iterator iterator() { * Iterator. */ private static class Adapter implements Iterator { - private final Iterator itr; + private final Iterator itr; - private Adapter(Iterator itr) { + private Adapter(Iterator itr) { this.itr = itr; } @@ -748,7 +748,7 @@ private void onResponse(Node node) { private void onFailure(Node node) { while(true) { DeadHostState previousDeadHostState = - blacklist.putIfAbsent(node.getHost(), new DeadHostState(System.nanoTime())); + blacklist.putIfAbsent(node.getHost(), new DeadHostState(TimeSupplier.DEFAULT)); if (previousDeadHostState == null) { if (logger.isDebugEnabled()) { logger.debug("added [" + node + "] to blacklist"); @@ -756,7 +756,7 @@ private void onFailure(Node node) { break; } if (blacklist.replace(node.getHost(), previousDeadHostState, - new DeadHostState(previousDeadHostState, System.nanoTime()))) { + new DeadHostState(previousDeadHostState))) { if (logger.isDebugEnabled()) { logger.debug("updated [" + node + "] already in blacklist"); } @@ -1025,13 +1025,13 @@ static class NodeTuple { * Contains a reference to a blacklisted node and the time until it is * revived. We use this so we can do a single pass over the blacklist. */ - private static class DeadNodeAndRevival implements Comparable { + private static class DeadNodeAndDeadness implements Comparable { final Node node; - final long nanosUntilRevival; + final DeadHostState deadness; - DeadNodeAndRevival(Node node, long nanosUntilRevival) { + DeadNodeAndDeadness(Node node, DeadHostState deadness) { this.node = node; - this.nanosUntilRevival = nanosUntilRevival; + this.deadness = deadness; } @Override @@ -1040,8 +1040,8 @@ public String toString() { } @Override - public int compareTo(DeadNodeAndRevival rhs) { - return Long.compare(rhs.nanosUntilRevival, nanosUntilRevival); + public int compareTo(DeadNodeAndDeadness rhs) { + return deadness.compareTo(rhs.deadness); } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/DeadHostStateTests.java b/client/rest/src/test/java/org/elasticsearch/client/DeadHostStateTests.java index 958a62401d458..ed3fda5bd3289 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/DeadHostStateTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/DeadHostStateTests.java @@ -21,29 +21,32 @@ import java.util.concurrent.TimeUnit; +import org.elasticsearch.client.DeadHostState.TimeSupplier; + import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThan; -import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; public class DeadHostStateTests extends RestClientTestCase { private static long[] EXPECTED_TIMEOUTS_SECONDS = new long[]{60, 84, 120, 169, 240, 339, 480, 678, 960, 1357, 1800}; public void testInitialDeadHostStateDefaultTimeSupplier() { - DeadHostState deadHostState = new DeadHostState(System.nanoTime()); + DeadHostState deadHostState = new DeadHostState(DeadHostState.TimeSupplier.DEFAULT); long currentTime = System.nanoTime(); assertThat(deadHostState.getDeadUntilNanos(), greaterThan(currentTime)); assertThat(deadHostState.getFailedAttempts(), equalTo(1)); } public void testDeadHostStateFromPreviousDefaultTimeSupplier() { - DeadHostState previous = new DeadHostState(System.nanoTime()); + DeadHostState previous = new DeadHostState(DeadHostState.TimeSupplier.DEFAULT); int iters = randomIntBetween(5, 30); for (int i = 0; i < iters; i++) { - DeadHostState deadHostState = new DeadHostState(previous, System.nanoTime()); + DeadHostState deadHostState = new DeadHostState(previous); assertThat(deadHostState.getDeadUntilNanos(), greaterThan(previous.getDeadUntilNanos())); assertThat(deadHostState.getFailedAttempts(), equalTo(previous.getFailedAttempts() + 1)); previous = deadHostState; @@ -55,9 +58,9 @@ public void testCompareToDefaultTimeSupplier() { DeadHostState[] deadHostStates = new DeadHostState[numObjects]; for (int i = 0; i < numObjects; i++) { if (i == 0) { - deadHostStates[i] = new DeadHostState(System.nanoTime()); + deadHostStates[i] = new DeadHostState(DeadHostState.TimeSupplier.DEFAULT); } else { - deadHostStates[i] = new DeadHostState(deadHostStates[i - 1], System.nanoTime()); + deadHostStates[i] = new DeadHostState(deadHostStates[i - 1]); } } for (int k = 1; k < deadHostStates.length; k++) { @@ -66,41 +69,69 @@ public void testCompareToDefaultTimeSupplier() { } } - public void testNanosUntilRevival() { + public void testCompareToDifferingTimeSupplier() { + try { + new DeadHostState(TimeSupplier.DEFAULT).compareTo( + new DeadHostState(new ConfigurableTimeSupplier())); + fail("expected failure"); + } catch (IllegalArgumentException e) { + assertEquals("can't compare DeadHostStates with different clocks [nanoTime != configured[0]]", + e.getMessage()); + } + } + + public void testShallBeRetried() { + ConfigurableTimeSupplier timeSupplier = new ConfigurableTimeSupplier(); DeadHostState deadHostState = null; for (int i = 0; i < EXPECTED_TIMEOUTS_SECONDS.length; i++) { long expectedTimeoutSecond = EXPECTED_TIMEOUTS_SECONDS[i]; - long now = 0; + timeSupplier.nanoTime = 0; if (i == 0) { - deadHostState = new DeadHostState(0); + deadHostState = new DeadHostState(timeSupplier); } else { - deadHostState = new DeadHostState(deadHostState, 0); + deadHostState = new DeadHostState(deadHostState); } for (int j = 0; j < expectedTimeoutSecond; j++) { - now += TimeUnit.SECONDS.toNanos(1); - assertThat(deadHostState.nanosUntilRevival(now), lessThanOrEqualTo(0L)); + timeSupplier.nanoTime += TimeUnit.SECONDS.toNanos(1); + assertThat(deadHostState.shallBeRetried(), is(false)); } int iters = randomIntBetween(5, 30); for (int j = 0; j < iters; j++) { - now += TimeUnit.SECONDS.toNanos(1); - assertThat(deadHostState.nanosUntilRevival(now), greaterThan(0L)); + timeSupplier.nanoTime += TimeUnit.SECONDS.toNanos(1); + assertThat(deadHostState.shallBeRetried(), is(true)); } } } public void testDeadHostStateTimeouts() { - DeadHostState previous = new DeadHostState(0); + ConfigurableTimeSupplier zeroTimeSupplier = new ConfigurableTimeSupplier(); + zeroTimeSupplier.nanoTime = 0L; + DeadHostState previous = new DeadHostState(zeroTimeSupplier); for (long expectedTimeoutsSecond : EXPECTED_TIMEOUTS_SECONDS) { assertThat(TimeUnit.NANOSECONDS.toSeconds(previous.getDeadUntilNanos()), equalTo(expectedTimeoutsSecond)); - previous = new DeadHostState(previous, 0); + previous = new DeadHostState(previous); } //check that from here on the timeout does not increase int iters = randomIntBetween(5, 30); for (int i = 0; i < iters; i++) { - DeadHostState deadHostState = new DeadHostState(previous, 0); + DeadHostState deadHostState = new DeadHostState(previous); assertThat(TimeUnit.NANOSECONDS.toSeconds(deadHostState.getDeadUntilNanos()), equalTo(EXPECTED_TIMEOUTS_SECONDS[EXPECTED_TIMEOUTS_SECONDS.length - 1])); previous = deadHostState; } } + + private static class ConfigurableTimeSupplier implements DeadHostState.TimeSupplier { + long nanoTime; + + @Override + public long nanoTime() { + return nanoTime; + } + + @Override + public String toString() { + return "configured[" + nanoTime + "]"; + } + } } From a6a5b4618492b7386d09d81cd93ab321d05f6bff Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 8 Jun 2018 16:15:08 -0400 Subject: [PATCH 16/23] Cleanup --- .../elasticsearch/client/DeadHostState.java | 4 ++ .../org/elasticsearch/client/RestClient.java | 4 +- .../client/DeadHostStateTests.java | 2 +- .../elasticsearch/client/RestClientTests.java | 46 +++++++++---------- .../sniff/ElasticsearchNodesSnifferTests.java | 1 + 5 files changed, 31 insertions(+), 26 deletions(-) diff --git a/client/rest/src/main/java/org/elasticsearch/client/DeadHostState.java b/client/rest/src/main/java/org/elasticsearch/client/DeadHostState.java index f9c6959236829..2a62adb285ad6 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/DeadHostState.java +++ b/client/rest/src/main/java/org/elasticsearch/client/DeadHostState.java @@ -86,6 +86,10 @@ int getFailedAttempts() { @Override public int compareTo(DeadHostState other) { + if (timeSupplier != other.timeSupplier) { + throw new IllegalArgumentException("can't compare DeadHostStates with different clocks [" + + timeSupplier + " != " + other.timeSupplier + "]"); + } return Long.compare(deadUntilNanos, other.deadUntilNanos); } diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java index d338297d65fb0..7625e5c3a3ab4 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java @@ -624,7 +624,7 @@ private void setHeaders(HttpRequest httpRequest, Collection

    requestHeade */ private NodeTuple> nextNode(NodeSelector nodeSelector) throws IOException { NodeTuple> nodeTuple = this.nodeTuple; - List hosts = selectHosts(nodeTuple, blacklist, lastNodeIndex, System.nanoTime(), nodeSelector); + List hosts = selectHosts(nodeTuple, blacklist, lastNodeIndex, nodeSelector); return new NodeTuple<>(hosts.iterator(), nodeTuple.authCache); } @@ -633,7 +633,7 @@ private NodeTuple> nextNode(NodeSelector nodeSelector) throws IOE */ static List selectHosts(NodeTuple> nodeTuple, Map blacklist, AtomicInteger lastNodeIndex, - long now, NodeSelector nodeSelector) throws IOException { + NodeSelector nodeSelector) throws IOException { /* * Sort the nodes into living and dead lists. */ diff --git a/client/rest/src/test/java/org/elasticsearch/client/DeadHostStateTests.java b/client/rest/src/test/java/org/elasticsearch/client/DeadHostStateTests.java index ed3fda5bd3289..daea27f896328 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/DeadHostStateTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/DeadHostStateTests.java @@ -121,7 +121,7 @@ public void testDeadHostStateTimeouts() { } } - private static class ConfigurableTimeSupplier implements DeadHostState.TimeSupplier { + static class ConfigurableTimeSupplier implements DeadHostState.TimeSupplier { long nanoTime; @Override diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java index cb76aa96b8c05..8741525c79984 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java @@ -22,6 +22,7 @@ import org.apache.http.Header; import org.apache.http.HttpHost; import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; +import org.elasticsearch.client.DeadHostStateTests.ConfigurableTimeSupplier; import org.elasticsearch.client.RestClient.NodeTuple; import java.io.IOException; @@ -379,8 +380,8 @@ public String toString() { Map emptyBlacklist = Collections.emptyMap(); // Normal cases where the node selector doesn't reject all living nodes - assertSelectLivingHosts(Arrays.asList(n1, n2, n3), nodeTuple, emptyBlacklist, 0, NodeSelector.ANY); - assertSelectLivingHosts(Arrays.asList(n2, n3), nodeTuple, emptyBlacklist, 0, not1); + assertSelectLivingHosts(Arrays.asList(n1, n2, n3), nodeTuple, emptyBlacklist, NodeSelector.ANY); + assertSelectLivingHosts(Arrays.asList(n2, n3), nodeTuple, emptyBlacklist, not1); /* * Try a NodeSelector that excludes all nodes. This should @@ -390,24 +391,24 @@ public String toString() { String message = "NodeSelector [NONE] rejected all nodes, living [" + "[host=http://1, version=1], [host=http://2, version=2], " + "[host=http://3, version=3]] and dead []"; - assertEquals(message, assertSelectAllRejected(nodeTuple, emptyBlacklist, 0, noNodes)); + assertEquals(message, assertSelectAllRejected(nodeTuple, emptyBlacklist, noNodes)); } // Mark all the nodes dead for a few test cases { - long now = 0; + ConfigurableTimeSupplier timeSupplier = new ConfigurableTimeSupplier(); Map blacklist = new HashMap<>(); - blacklist.put(n1.getHost(), new DeadHostState(now)); - blacklist.put(n2.getHost(), new DeadHostState(new DeadHostState(now), now)); - blacklist.put(n3.getHost(), new DeadHostState(new DeadHostState(new DeadHostState(now), now), now)); + blacklist.put(n1.getHost(), new DeadHostState(timeSupplier)); + blacklist.put(n2.getHost(), new DeadHostState(new DeadHostState(timeSupplier))); + blacklist.put(n3.getHost(), new DeadHostState(new DeadHostState(new DeadHostState(timeSupplier)))); /* * selectHosts will revive a single host if regardless of * blacklist time. It'll revive the node that is closest * to being revived that the NodeSelector is ok with. */ - assertEquals(singletonList(n1), RestClient.selectHosts(nodeTuple, blacklist, new AtomicInteger(), now, NodeSelector.ANY)); - assertEquals(singletonList(n2), RestClient.selectHosts(nodeTuple, blacklist, new AtomicInteger(), now, not1)); + assertEquals(singletonList(n1), RestClient.selectHosts(nodeTuple, blacklist, new AtomicInteger(), NodeSelector.ANY)); + assertEquals(singletonList(n2), RestClient.selectHosts(nodeTuple, blacklist, new AtomicInteger(), not1)); /* * Try a NodeSelector that excludes all nodes. This should @@ -418,43 +419,42 @@ public String toString() { String message = "NodeSelector [NONE] rejected all nodes, living [] and dead [" + "[host=http://1, version=1], [host=http://2, version=2], " + "[host=http://3, version=3]]"; - assertEquals(message, assertSelectAllRejected(nodeTuple, blacklist, now, noNodes)); + assertEquals(message, assertSelectAllRejected(nodeTuple, blacklist, noNodes)); /* * Now lets wind the clock forward, past the timeout for one of * the dead nodes. We should return it. */ - now = new DeadHostState(now).getDeadUntilNanos(); - assertSelectLivingHosts(Arrays.asList(n1), nodeTuple, blacklist, 0, NodeSelector.ANY); + timeSupplier.nanoTime = new DeadHostState(timeSupplier).getDeadUntilNanos(); + assertSelectLivingHosts(Arrays.asList(n1), nodeTuple, blacklist, NodeSelector.ANY); /* * But if the NodeSelector rejects that node then we'll pick the * first on that the NodeSelector doesn't reject. */ - assertSelectLivingHosts(Arrays.asList(n2), nodeTuple, blacklist, 0, not1); + assertSelectLivingHosts(Arrays.asList(n2), nodeTuple, blacklist, not1); /* * If we wind the clock way into the future, past any of the * blacklist timeouts then we function as though the nodes aren't * in the blacklist at all. */ - now += DeadHostState.MAX_CONNECTION_TIMEOUT_NANOS; - assertSelectLivingHosts(Arrays.asList(n1, n2, n3), nodeTuple, blacklist, now, NodeSelector.ANY); - assertSelectLivingHosts(Arrays.asList(n2, n3), nodeTuple, blacklist, now, not1); + timeSupplier.nanoTime += DeadHostState.MAX_CONNECTION_TIMEOUT_NANOS; + assertSelectLivingHosts(Arrays.asList(n1, n2, n3), nodeTuple, blacklist, NodeSelector.ANY); + assertSelectLivingHosts(Arrays.asList(n2, n3), nodeTuple, blacklist, not1); } } private void assertSelectLivingHosts(List expectedNodes, NodeTuple> nodeTuple, - Map blacklist, long now, NodeSelector nodeSelector) throws IOException { + Map blacklist, NodeSelector nodeSelector) throws IOException { int iterations = 1000; AtomicInteger lastNodeIndex = new AtomicInteger(0); - assertEquals(expectedNodes, RestClient.selectHosts(nodeTuple, blacklist, - lastNodeIndex, now, nodeSelector)); + assertEquals(expectedNodes, RestClient.selectHosts(nodeTuple, blacklist, lastNodeIndex, nodeSelector)); // Calling it again rotates the set of results for (int i = 1; i < iterations; i++) { Collections.rotate(expectedNodes, 1); - assertEquals("iteration " + i, expectedNodes, RestClient.selectHosts(nodeTuple, blacklist, - lastNodeIndex, now, nodeSelector)); + assertEquals("iteration " + i, expectedNodes, + RestClient.selectHosts(nodeTuple, blacklist, lastNodeIndex, nodeSelector)); } } @@ -463,9 +463,9 @@ private void assertSelectLivingHosts(List expectedNodes, NodeTuple> nodeTuple, - Map blacklist, long now, NodeSelector nodeSelector) { + Map blacklist, NodeSelector nodeSelector) { try { - RestClient.selectHosts(nodeTuple, blacklist, new AtomicInteger(0), now, nodeSelector); + RestClient.selectHosts(nodeTuple, blacklist, new AtomicInteger(0), nodeSelector); throw new AssertionError("expected selectHosts to fail"); } catch (IOException e) { return e.getMessage(); diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferTests.java index 452b716badb43..e8efdc7077089 100644 --- a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferTests.java +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferTests.java @@ -30,6 +30,7 @@ import org.apache.http.Consts; import org.apache.http.HttpHost; import org.apache.http.client.methods.HttpGet; +import org.elasticsearch.client.Node; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; From 7d6ee4fe481596883634976635f1948641dbb892 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 8 Jun 2018 16:38:36 -0400 Subject: [PATCH 17/23] Reuse method --- .../org/elasticsearch/client/RestClient.java | 24 ++++++------------- .../client/RestClientBuilder.java | 7 +++--- .../client/RestClientBuilderTests.java | 4 ++-- .../elasticsearch/client/RestClientTests.java | 4 ++-- .../client/sniff/SnifferTests.java | 2 +- 5 files changed, 15 insertions(+), 26 deletions(-) diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java index 7625e5c3a3ab4..c7881e660c64e 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java @@ -62,12 +62,10 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; -import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashMap; -import java.util.LinkedHashSet; import java.util.List; import java.util.Locale; import java.util.Map; @@ -131,7 +129,7 @@ public class RestClient implements Closeable { * If you don't either one is fine. */ public static RestClientBuilder builder(Node... nodes) { - return new RestClientBuilder(nodes); + return new RestClientBuilder(nodes == null ? null : Arrays.asList(nodes)); } /** @@ -154,15 +152,7 @@ public static RestClientBuilder builder(HttpHost... hosts) { */ @Deprecated public void setHosts(HttpHost... hosts) { - if (hosts == null || hosts.length == 0) { - throw new IllegalArgumentException("hosts must not be null nor empty"); - } - List nodes = new ArrayList<>(); - for (HttpHost host : hosts) { - nodes.add(new Node( - Objects.requireNonNull(host, "host cannot be null"))); - } - setNodes(nodes); + setNodes(hostsToNodes(hosts)); } /** @@ -186,13 +176,13 @@ public synchronized void setNodes(Collection nodes) { this.blacklist.clear(); } - private static Node[] hostsToNodes(HttpHost[] hosts) { + private static List hostsToNodes(HttpHost[] hosts) { if (hosts == null || hosts.length == 0) { - throw new IllegalArgumentException("hosts must not be null or empty"); + throw new IllegalArgumentException("hosts must not be null nor empty"); } - Node[] nodes = new Node[hosts.length]; + List nodes = new ArrayList<>(hosts.length); for (int i = 0; i < hosts.length; i++) { - nodes[i] = new Node(hosts[i]); + nodes.add(new Node(hosts[i])); } return nodes; } @@ -1008,7 +998,7 @@ public void onFailure(Node node) {} } /** - * {@link NodeTupe} enables the {@linkplain Node}s and {@linkplain AuthCache} + * {@link NodeTuple} enables the {@linkplain Node}s and {@linkplain AuthCache} * to be set together in a thread safe, volatile way. */ static class NodeTuple { diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java b/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java index f8084af7402c8..17d27248dfea9 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java @@ -31,7 +31,6 @@ import java.security.AccessController; import java.security.NoSuchAlgorithmException; import java.security.PrivilegedAction; -import java.util.Arrays; import java.util.List; import java.util.Objects; @@ -62,8 +61,8 @@ public final class RestClientBuilder { * * @throws IllegalArgumentException if {@code nodes} is {@code null} or empty. */ - RestClientBuilder(Node[] nodes) { - if (nodes == null || nodes.length == 0) { + RestClientBuilder(List nodes) { + if (nodes == null || nodes.isEmpty()) { throw new IllegalArgumentException("nodes must not be null or empty"); } for (Node node : nodes) { @@ -71,7 +70,7 @@ public final class RestClientBuilder { throw new IllegalArgumentException("node cannot be null"); } } - this.nodes = Arrays.asList(nodes); + this.nodes = nodes; } /** diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java index 39be4db3b98ee..9fcb4978e28a7 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java @@ -40,14 +40,14 @@ public void testBuild() throws IOException { RestClient.builder((HttpHost[])null); fail("should have failed"); } catch(IllegalArgumentException e) { - assertEquals("hosts must not be null or empty", e.getMessage()); + assertEquals("hosts must not be null nor empty", e.getMessage()); } try { RestClient.builder(new HttpHost[] {}); fail("should have failed"); } catch(IllegalArgumentException e) { - assertEquals("hosts must not be null or empty", e.getMessage()); + assertEquals("hosts must not be null nor empty", e.getMessage()); } try { diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java index 8741525c79984..01f6f308f6227 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java @@ -251,13 +251,13 @@ public void testSetHostsWrongArguments() throws IOException { try (RestClient restClient = createRestClient()) { restClient.setHosts((HttpHost) null); fail("setHosts should have failed"); - } catch (NullPointerException e) { + } catch (IllegalArgumentException e) { assertEquals("host cannot be null", e.getMessage()); } try (RestClient restClient = createRestClient()) { restClient.setHosts(new HttpHost("localhost", 9200), null, new HttpHost("localhost", 9201)); fail("setHosts should have failed"); - } catch (NullPointerException e) { + } catch (IllegalArgumentException e) { assertEquals("host cannot be null", e.getMessage()); } } diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java index 1d4542c99050a..9c54a20af2175 100644 --- a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java @@ -670,7 +670,7 @@ static final void assertNodesEquals(List expected, List actual) { assertEquals(expectedNode.getRoles(), actualNode.getRoles()); } } catch (AssertionError e) { - throw new AssertionError("nodes differ expected: " + expected + " but was: " + actual, e); + throw new AssertionError("nodes differ, expected: " + expected + " but was: " + actual, e); } } } From 25280ef46ebf3a19459457788a909d2f13315822 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 11 Jun 2018 11:42:04 -0400 Subject: [PATCH 18/23] Docs! --- .../RestClientDocumentation.java | 46 +++++++++---------- .../high-level/getting-started.asciidoc | 10 ++++ docs/java-rest/low-level/usage.asciidoc | 35 ++++++++++++-- 3 files changed, 63 insertions(+), 28 deletions(-) diff --git a/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java b/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java index d71bfa78937a1..b90c4c0956ba3 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java +++ b/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java @@ -37,6 +37,7 @@ import org.apache.http.ssl.SSLContexts; import org.apache.http.util.EntityUtils; import org.elasticsearch.client.HttpAsyncResponseConsumerFactory; +import org.elasticsearch.client.HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory; import org.elasticsearch.client.Node; import org.elasticsearch.client.NodeSelector; import org.elasticsearch.client.Request; @@ -74,6 +75,19 @@ */ @SuppressWarnings("unused") public class RestClientDocumentation { + private static final String TOKEN = "DUMMY"; + + // tag::rest-client-options-singleton + private static final RequestOptions COMMON_OPTIONS; + static { + RequestOptions.Builder builder = RequestOptions.DEFAULT.toBuilder(); + builder.addHeader("Authorization", "Bearer " + TOKEN); // <1> + builder.setNodeSelector(NodeSelector.NOT_MASTER_ONLY); // <2> + builder.setHttpAsyncResponseConsumerFactory( // <3> + new HeapBufferedResponseConsumerFactory(30 * 1024 * 1024 * 1024)); + COMMON_OPTIONS = builder.build(); + } + // end::rest-client-options-singleton @SuppressWarnings("unused") public void testUsage() throws IOException, InterruptedException { @@ -174,30 +188,14 @@ public void onFailure(Exception exception) { //tag::rest-client-body-shorter request.setJsonEntity("{\"json\":\"text\"}"); //end::rest-client-body-shorter - { - //tag::rest-client-headers - RequestOptions.Builder options = request.getOptions().toBuilder(); - options.addHeader("Accept", "text/plain"); - options.addHeader("Cache-Control", "no-cache"); - request.setOptions(options); - //end::rest-client-headers - } - { - //tag::rest-client-response-consumer - RequestOptions.Builder options = request.getOptions().toBuilder(); - options.setHttpAsyncResponseConsumerFactory( - new HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory(30 * 1024 * 1024)); - request.setOptions(options); - //end::rest-client-response-consumer - } - { - //tag::rest-client-node-selector - // TODO link me to docs - RequestOptions.Builder options = request.getOptions().toBuilder(); - options.setNodeSelector(NodeSelector.NOT_MASTER_ONLY); - request.setOptions(options); - //end::rest-client-node-selector - } + //tag::rest-client-options-set-singleton + request.setOptions(COMMON_OPTIONS); + //end::rest-client-options-set-singleton + //tag::rest-client-options-customize + RequestOptions.Builder options = COMMON_OPTIONS.toBuilder(); + options.addHeader("cats", "knock things off of other things"); + request.setOptions(options); + //end::rest-client-options-customize } { HttpEntity[] documents = new HttpEntity[10]; diff --git a/docs/java-rest/high-level/getting-started.asciidoc b/docs/java-rest/high-level/getting-started.asciidoc index 14a5058eb7272..3e9b9fa7ea08f 100644 --- a/docs/java-rest/high-level/getting-started.asciidoc +++ b/docs/java-rest/high-level/getting-started.asciidoc @@ -144,3 +144,13 @@ include-tagged::{doc-tests}/MiscellaneousDocumentationIT.java[rest-high-level-cl In the rest of this documentation about the Java High Level Client, the `RestHighLevelClient` instance will be referenced as `client`. + +[[java-rest-hight-getting-started-request-options]] +=== RequestOptions + +All APIs in the `RestHighLevelClient` accept a `RequestOptions` which you can +use to customize the request in ways that won't change how Elasticsearch +executes the request. For example, this is the place where you'd specify a +`NodeSelector` to control which node receives the request. See the +<> for +more examples of customizing the options. diff --git a/docs/java-rest/low-level/usage.asciidoc b/docs/java-rest/low-level/usage.asciidoc index 012ce418226cd..2e6f264d3b00b 100644 --- a/docs/java-rest/low-level/usage.asciidoc +++ b/docs/java-rest/low-level/usage.asciidoc @@ -271,24 +271,51 @@ a `ContentType` of `application/json`. include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-body-shorter] -------------------------------------------------- -And you can add one or more headers to send with the request: +[[java-rest-low-usage-request-options]] +==== RequestOptions + +The `RequestOptions` class holds parts of the request that should be shared +between many requests in the same application. You can make a singleton +instance and share it between all requests: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-headers] +include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-options-singleton] -------------------------------------------------- +<1> Add any headers needed by all requests. +<2> Set a `NodeSelector`. +<3> Customize the response consumer. + +`addHeader` is for headers that are required for authorization or to work with +a proxy that in front of Elasticsearch. There is no need to set the +`Content-Type` header because the client will automatically set that from the +`HttpEntity` attached to the request. + +You can set the `NodeSelector` which controls which nodes will receive +requests. `NodeSelector.NOT_MASTER_ONLY` is a good choice. You can also customize the response consumer used to buffer the asynchronous responses. The default consumer will buffer up to 100MB of response on the JVM heap. If the response is larger then the request will fail. You could, for example, lower the maximum size which might be useful if you are running -in a heap constrained environment: +in a heap constrained environment like the exmaple above. + +Once you've created the singleton you can use it when making requests: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-options-set-singleton] +-------------------------------------------------- + +You can also customize these options on a per request basis. For example, this +adds an extra header: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-response-consumer] +include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-options-customize] -------------------------------------------------- + ==== Multiple parallel asynchronous actions The client is quite happy to execute many actions in parallel. The following From 5eca2904abb20a514b1a35a317b8a1b19bf05782 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 11 Jun 2018 11:49:44 -0400 Subject: [PATCH 19/23] Fix import order --- .../src/main/java/org/elasticsearch/client/RestClient.java | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java index c7881e660c64e..e5ad34e8206fd 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java @@ -48,10 +48,6 @@ import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; import org.elasticsearch.client.DeadHostState.TimeSupplier; -import javax.net.ssl.SSLHandshakeException; - -import static java.util.Collections.singletonList; - import java.io.Closeable; import java.io.IOException; import java.net.ConnectException; @@ -78,6 +74,9 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; +import javax.net.ssl.SSLHandshakeException; + +import static java.util.Collections.singletonList; /** * Client that connects to an Elasticsearch cluster through HTTP. From 0ab1b30a67d219c30cf3f90e77880176a5010909 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 11 Jun 2018 12:01:33 -0400 Subject: [PATCH 20/23] Cleanups --- .../org/elasticsearch/client/RestClient.java | 70 +++++++++---------- .../RestClientMultipleHostsIntegTests.java | 2 +- .../rest/yaml/ESClientYamlSuiteTestCase.java | 11 ++- 3 files changed, 45 insertions(+), 38 deletions(-) diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java index e5ad34e8206fd..82039cab5d04c 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java @@ -166,7 +166,7 @@ public synchronized void setNodes(Collection nodes) { Map nodesByHost = new LinkedHashMap<>(); for (Node node : nodes) { Objects.requireNonNull(node, "node cannot be null"); - // TODO should we throw an IAE if this happens? + // TODO should we throw an IAE if we have two nodes with the same host? nodesByHost.put(node.getHost(), node); authCache.put(node.getHost(), new BasicScheme()); } @@ -627,7 +627,7 @@ static List selectHosts(NodeTuple> nodeTuple, * Sort the nodes into living and dead lists. */ List livingNodes = new ArrayList<>(nodeTuple.nodes.size() - blacklist.size()); - List deadNodes = new ArrayList<>(blacklist.size()); + List deadNodes = new ArrayList<>(blacklist.size()); for (Node node : nodeTuple.nodes) { DeadHostState deadness = blacklist.get(node.getHost()); if (deadness == null) { @@ -638,7 +638,7 @@ static List selectHosts(NodeTuple> nodeTuple, livingNodes.add(node); continue; } - deadNodes.add(new DeadNodeAndDeadness(node, deadness)); + deadNodes.add(new DeadNode(node, deadness)); } if (false == livingNodes.isEmpty()) { @@ -671,7 +671,7 @@ static List selectHosts(NodeTuple> nodeTuple, * node. */ if (false == deadNodes.isEmpty()) { - final List selectedDeadNodes = new ArrayList<>(deadNodes); + final List selectedDeadNodes = new ArrayList<>(deadNodes); /* * We'd like NodeSelectors to remove items directly from deadNodes * so we can find the minimum after it is filtered without having @@ -681,7 +681,7 @@ static List selectHosts(NodeTuple> nodeTuple, nodeSelector.select(new Iterable() { @Override public Iterator iterator() { - return new Adapter(selectedDeadNodes.iterator()); + return new DeadNodeIteratorAdapter(selectedDeadNodes.iterator()); } }); if (false == selectedDeadNodes.isEmpty()) { @@ -692,33 +692,6 @@ public Iterator iterator() { + "living " + livingNodes + " and dead " + deadNodes); } - /** - * Adapts an Iterator into an - * Iterator. - */ - private static class Adapter implements Iterator { - private final Iterator itr; - - private Adapter(Iterator itr) { - this.itr = itr; - } - - @Override - public boolean hasNext() { - return itr.hasNext(); - } - - @Override - public Node next() { - return itr.next().node; - } - - @Override - public void remove() { - itr.remove(); - } - } - /** * Called after each successful request call. * Receives as an argument the host that was used for the successful request. @@ -1014,11 +987,11 @@ static class NodeTuple { * Contains a reference to a blacklisted node and the time until it is * revived. We use this so we can do a single pass over the blacklist. */ - private static class DeadNodeAndDeadness implements Comparable { + private static class DeadNode implements Comparable { final Node node; final DeadHostState deadness; - DeadNodeAndDeadness(Node node, DeadHostState deadness) { + DeadNode(Node node, DeadHostState deadness) { this.node = node; this.deadness = deadness; } @@ -1029,11 +1002,38 @@ public String toString() { } @Override - public int compareTo(DeadNodeAndDeadness rhs) { + public int compareTo(DeadNode rhs) { return deadness.compareTo(rhs.deadness); } } + /** + * Adapts an Iterator into an + * Iterator. + */ + private static class DeadNodeIteratorAdapter implements Iterator { + private final Iterator itr; + + private DeadNodeIteratorAdapter(Iterator itr) { + this.itr = itr; + } + + @Override + public boolean hasNext() { + return itr.hasNext(); + } + + @Override + public Node next() { + return itr.next().node; + } + + @Override + public void remove() { + itr.remove(); + } + } + /** * Add all headers from the provided varargs argument to a {@link Request}. This only exists * to support methods that exist for backwards compatibility. diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java index acd57e40ae17a..1c68bc5deb7bc 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java @@ -77,7 +77,7 @@ public static void startHttpServer() throws Exception { httpServers[i] = httpServer; httpHosts[i] = new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()); } - restClient = buildRestClient(); + restClient = buildRestClient(); } private static RestClient buildRestClient() { diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java index ea4ff5bbe50a5..c0b5b1e95886c 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java @@ -53,10 +53,17 @@ import java.util.Locale; import java.util.Map; import java.util.Set; -import java.util.stream.Collectors; /** - * Runs a suite of yaml tests shared with all the official Elasticsearch clients against against an elasticsearch cluster. + * Runs a suite of yaml tests shared with all the official Elasticsearch + * clients against against an elasticsearch cluster. + *

    + * IMPORTANT: These tests sniff the cluster for metadata + * and hosts on startup and replace the list of hosts that they are + * configured to use with the list sniffed from the cluster. So you can't + * control which nodes receive the request by providing the right list of + * nodes in the tests.rest.cluster system property. Instead + * the tests must explictly use `node_selector`s. */ public abstract class ESClientYamlSuiteTestCase extends ESRestTestCase { From a9e682f37b3e3caa9da68889e25299e4bdf98ae3 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 11 Jun 2018 12:06:55 -0400 Subject: [PATCH 21/23] Revert "Drop equals from Node" This reverts commit d551dcbd238d5806ce30aeae9179f931afb2cd23. --- .../java/org/elasticsearch/client/Node.java | 18 +++++++ .../elasticsearch/client/NodeSelector.java | 2 +- .../org/elasticsearch/client/NodeTests.java | 25 ++++++++++ .../ElasticsearchNodesSnifferParseTests.java | 47 ++++++++----------- .../sniff/ElasticsearchNodesSnifferTests.java | 3 +- .../client/sniff/SnifferTests.java | 28 ++--------- 6 files changed, 69 insertions(+), 54 deletions(-) diff --git a/client/rest/src/main/java/org/elasticsearch/client/Node.java b/client/rest/src/main/java/org/elasticsearch/client/Node.java index 0f7050f13667e..231ff0e813fc2 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/Node.java +++ b/client/rest/src/main/java/org/elasticsearch/client/Node.java @@ -156,6 +156,24 @@ public String toString() { return b.append(']').toString(); } + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + Node other = (Node) obj; + return host.equals(other.host) + && Objects.equals(boundHosts, other.boundHosts) + && Objects.equals(version, other.version) + && Objects.equals(name, other.name) + && Objects.equals(roles, other.roles); + } + + @Override + public int hashCode() { + return Objects.hash(host, boundHosts, name, version, roles); + } + /** * Role information about an Elasticsearch process. */ diff --git a/client/rest/src/main/java/org/elasticsearch/client/NodeSelector.java b/client/rest/src/main/java/org/elasticsearch/client/NodeSelector.java index c6879296fe8c4..5f5296fe16b13 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/NodeSelector.java +++ b/client/rest/src/main/java/org/elasticsearch/client/NodeSelector.java @@ -66,7 +66,7 @@ public String toString() { /** * Selector that matches any node that has metadata and doesn't * have the {@code master} role OR it has the data {@code data} - * role. It does not reorder the nodes sent to it. + * role. */ NodeSelector NOT_MASTER_ONLY = new NodeSelector() { @Override diff --git a/client/rest/src/test/java/org/elasticsearch/client/NodeTests.java b/client/rest/src/test/java/org/elasticsearch/client/NodeTests.java index 72f22eabb5ae8..989861df50293 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/NodeTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/NodeTests.java @@ -25,7 +25,10 @@ import java.util.Arrays; import java.util.HashSet; +import static java.util.Collections.singleton; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; public class NodeTests extends RestClientTestCase { public void testWithHost() { @@ -61,4 +64,26 @@ public void testToString() { "nam", "ver", new Roles(true, false, false)).toString()); } + + public void testEqualsAndHashCode() { + HttpHost host = new HttpHost(randomAsciiAlphanumOfLength(5)); + Node node = new Node(host, + randomBoolean() ? null : singleton(host), + randomBoolean() ? null : randomAsciiAlphanumOfLength(5), + randomBoolean() ? null : randomAsciiAlphanumOfLength(5), + randomBoolean() ? null : new Roles(true, true, true)); + assertFalse(node.equals(null)); + assertTrue(node.equals(node)); + assertEquals(node.hashCode(), node.hashCode()); + Node copy = new Node(host, node.getBoundHosts(), node.getName(), node.getVersion(), node.getRoles()); + assertTrue(node.equals(copy)); + assertEquals(node.hashCode(), copy.hashCode()); + assertFalse(node.equals(new Node(new HttpHost(host.toHostString() + "changed"), node.getBoundHosts(), + node.getName(), node.getVersion(), node.getRoles()))); + assertFalse(node.equals(new Node(host, new HashSet<>(Arrays.asList(host, new HttpHost(host.toHostString() + "changed"))), + node.getName(), node.getVersion(), node.getRoles()))); + assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName() + "changed", node.getVersion(), node.getRoles()))); + assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName(), node.getVersion() + "changed", node.getRoles()))); + assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName(), node.getVersion(), new Roles(false, false, false)))); + } } diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferParseTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferParseTests.java index d076ef6bcd91e..712a836a17b8a 100644 --- a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferParseTests.java +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferParseTests.java @@ -30,16 +30,12 @@ import java.io.IOException; import java.io.InputStream; -import java.util.Arrays; -import java.util.Collections; -import java.util.Comparator; import java.util.HashSet; import java.util.List; import java.util.Set; import com.fasterxml.jackson.core.JsonFactory; -import static org.elasticsearch.client.sniff.SnifferTests.assertNodesEquals; import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasSize; import static org.junit.Assert.assertThat; @@ -57,14 +53,11 @@ private void checkFile(String file, Node... expected) throws IOException { try { HttpEntity entity = new InputStreamEntity(in, ContentType.APPLICATION_JSON); List nodes = ElasticsearchNodesSniffer.readHosts(entity, Scheme.HTTP, new JsonFactory()); - // Sort he list so the error messages are easier to read. - Collections.sort(nodes, new Comparator() { - @Override - public int compare(Node lhs, Node rhs) { - return lhs.getName().compareTo(rhs.getName()); - } - }); - assertNodesEquals(Arrays.asList(expected), nodes); + // Use these assertions because the error messages are nicer than hasItems. + assertThat(nodes, hasSize(expected.length)); + for (Node expectedNode : expected) { + assertThat(nodes, hasItem(expectedNode)); + } } finally { in.close(); } @@ -72,38 +65,38 @@ public int compare(Node lhs, Node rhs) { public void test2x() throws IOException { checkFile("2.0.0_nodes_http.json", - node(9207, "c1", "2.0.0", false, false, false), - node(9206, "c2", "2.0.0", false, false, false), + node(9200, "m1", "2.0.0", true, false, false), + node(9202, "m2", "2.0.0", true, true, false), + node(9201, "m3", "2.0.0", true, false, false), node(9205, "d1", "2.0.0", false, true, false), node(9204, "d2", "2.0.0", false, true, false), node(9203, "d3", "2.0.0", false, true, false), - node(9200, "m1", "2.0.0", true, false, false), - node(9202, "m2", "2.0.0", true, true, false), - node(9201, "m3", "2.0.0", true, false, false)); + node(9207, "c1", "2.0.0", false, false, false), + node(9206, "c2", "2.0.0", false, false, false)); } public void test5x() throws IOException { checkFile("5.0.0_nodes_http.json", - node(9206, "c1", "5.0.0", false, false, true), - node(9207, "c2", "5.0.0", false, false, true), + node(9200, "m1", "5.0.0", true, false, true), + node(9201, "m2", "5.0.0", true, true, true), + node(9202, "m3", "5.0.0", true, false, true), node(9203, "d1", "5.0.0", false, true, true), node(9204, "d2", "5.0.0", false, true, true), node(9205, "d3", "5.0.0", false, true, true), - node(9200, "m1", "5.0.0", true, false, true), - node(9201, "m2", "5.0.0", true, true, true), - node(9202, "m3", "5.0.0", true, false, true)); + node(9206, "c1", "5.0.0", false, false, true), + node(9207, "c2", "5.0.0", false, false, true)); } public void test6x() throws IOException { checkFile("6.0.0_nodes_http.json", - node(9206, "c1", "6.0.0", false, false, true), - node(9207, "c2", "6.0.0", false, false, true), + node(9200, "m1", "6.0.0", true, false, true), + node(9201, "m2", "6.0.0", true, true, true), + node(9202, "m3", "6.0.0", true, false, true), node(9203, "d1", "6.0.0", false, true, true), node(9204, "d2", "6.0.0", false, true, true), node(9205, "d3", "6.0.0", false, true, true), - node(9200, "m1", "6.0.0", true, false, true), - node(9201, "m2", "6.0.0", true, true, true), - node(9202, "m3", "6.0.0", true, false, true)); + node(9206, "c1", "6.0.0", false, false, true), + node(9207, "c2", "6.0.0", false, false, true)); } private Node node(int port, String name, String version, boolean master, boolean data, boolean ingest) { diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferTests.java index e8efdc7077089..260832ca90e17 100644 --- a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferTests.java +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferTests.java @@ -53,7 +53,6 @@ import java.util.Map; import java.util.Set; -import static org.elasticsearch.client.sniff.SnifferTests.assertNodesEquals; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.startsWith; @@ -120,7 +119,7 @@ public void testSniffNodes() throws IOException { if (sniffResponse.isFailure) { fail("sniffNodes should have failed"); } - assertNodesEquals(sniffResponse.result, sniffedNodes); + assertEquals(sniffResponse.result, sniffedNodes); } catch(ResponseException e) { Response response = e.getResponse(); if (sniffResponse.isFailure) { diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java index 9c54a20af2175..00c5eb31d17e8 100644 --- a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java @@ -31,7 +31,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collections; -import java.util.Iterator; import java.util.List; import java.util.Set; import java.util.concurrent.CancellationException; @@ -56,7 +55,7 @@ import static org.hamcrest.Matchers.is; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotSame; +import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertSame; import static org.junit.Assert.assertThat; @@ -112,11 +111,11 @@ public void shutdown() { fail("should have failed given that nodesSniffer says it threw an exception"); } else if (nodesSniffer.emptyList.get() > emptyList) { emptyList++; - assertNodesEquals(lastNodes, restClient.getNodes()); + assertEquals(lastNodes, restClient.getNodes()); } else { - assertNotSame(lastNodes, restClient.getNodes()); + assertNotEquals(lastNodes, restClient.getNodes()); List expectedNodes = CountingNodesSniffer.buildNodes(runs); - assertNodesEquals(expectedNodes, restClient.getNodes()); + assertEquals(expectedNodes, restClient.getNodes()); lastNodes = restClient.getNodes(); } } catch(IOException e) { @@ -654,23 +653,4 @@ public void testDefaultSchedulerShutdown() throws Exception { verify(executor, times(2)).awaitTermination(1000, TimeUnit.MILLISECONDS); verifyNoMoreInteractions(executor); } - - static final void assertNodesEquals(List expected, List actual) { - try { - assertEquals(expected.size(), actual.size()); - Iterator expectedItr = expected.iterator(); - Iterator actualItr = actual.iterator(); - while (expectedItr.hasNext()) { - Node expectedNode = expectedItr.next(); - Node actualNode = actualItr.next(); - assertEquals(expectedNode.getHost(), actualNode.getHost()); - assertEquals(expectedNode.getBoundHosts(), actualNode.getBoundHosts()); - assertEquals(expectedNode.getName(), actualNode.getName()); - assertEquals(expectedNode.getVersion(), actualNode.getVersion()); - assertEquals(expectedNode.getRoles(), actualNode.getRoles()); - } - } catch (AssertionError e) { - throw new AssertionError("nodes differ, expected: " + expected + " but was: " + actual, e); - } - } } From e29723ab662881532d64bb3d20b428dd6e171266 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 11 Jun 2018 12:59:35 -0400 Subject: [PATCH 22/23] Remove method we don't need any more --- .../java/org/elasticsearch/client/Node.java | 24 +------------------ .../org/elasticsearch/client/NodeTests.java | 18 -------------- 2 files changed, 1 insertion(+), 41 deletions(-) diff --git a/client/rest/src/main/java/org/elasticsearch/client/Node.java b/client/rest/src/main/java/org/elasticsearch/client/Node.java index 231ff0e813fc2..d66d0773016e6 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/Node.java +++ b/client/rest/src/main/java/org/elasticsearch/client/Node.java @@ -19,9 +19,6 @@ package org.elasticsearch.client; -import static java.util.Collections.unmodifiableSet; - -import java.util.HashSet; import java.util.Objects; import java.util.Set; @@ -79,25 +76,6 @@ public Node(HttpHost host) { this(host, null, null, null, null); } - /** - * Make a copy of this {@link Node} but replacing its - * {@link #getHost() host}. Use this when the sniffing implementation - * returns a {@link #getHost() host} that is not useful to the client. - */ - public Node withHost(HttpHost host) { - /* - * If the new host isn't in the bound hosts list we add it so the - * result looks sane. - */ - Set boundHosts = this.boundHosts; - if (false == boundHosts.contains(host)) { - boundHosts = new HashSet<>(boundHosts); - boundHosts.add(host); - boundHosts = unmodifiableSet(boundHosts); - } - return new Node(host, boundHosts, name, version, roles); - } - /** * Contact information for the host. */ @@ -164,8 +142,8 @@ public boolean equals(Object obj) { Node other = (Node) obj; return host.equals(other.host) && Objects.equals(boundHosts, other.boundHosts) - && Objects.equals(version, other.version) && Objects.equals(name, other.name) + && Objects.equals(version, other.version) && Objects.equals(roles, other.roles); } diff --git a/client/rest/src/test/java/org/elasticsearch/client/NodeTests.java b/client/rest/src/test/java/org/elasticsearch/client/NodeTests.java index 989861df50293..c6d60415b88dc 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/NodeTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/NodeTests.java @@ -31,24 +31,6 @@ import static org.junit.Assert.assertTrue; public class NodeTests extends RestClientTestCase { - public void testWithHost() { - HttpHost h1 = new HttpHost("1"); - HttpHost h2 = new HttpHost("2"); - HttpHost h3 = new HttpHost("3"); - - Node n = new Node(h1, new HashSet<>(Arrays.asList(h1, h2)), - randomAsciiAlphanumOfLength(5), randomAsciiAlphanumOfLength(5), - new Roles(randomBoolean(), randomBoolean(), randomBoolean())); - - // Host is in the bound hosts list - assertEquals(h2, n.withHost(h2).getHost()); - assertEquals(n.getBoundHosts(), n.withHost(h2).getBoundHosts()); - - // Host not in the bound hosts list - assertEquals(h3, n.withHost(h3).getHost()); - assertEquals(new HashSet<>(Arrays.asList(h1, h2, h3)), n.withHost(h3).getBoundHosts()); - } - public void testToString() { assertEquals("[host=http://1]", new Node(new HttpHost("1")).toString()); assertEquals("[host=http://1, roles=mdi]", new Node(new HttpHost("1"), From f030c9b590be74585db33001a5d7d0303130157f Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 11 Jun 2018 14:42:34 -0400 Subject: [PATCH 23/23] Words --- docs/java-rest/low-level/usage.asciidoc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/java-rest/low-level/usage.asciidoc b/docs/java-rest/low-level/usage.asciidoc index 2e6f264d3b00b..407947000de35 100644 --- a/docs/java-rest/low-level/usage.asciidoc +++ b/docs/java-rest/low-level/usage.asciidoc @@ -287,9 +287,9 @@ include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-options-sin <3> Customize the response consumer. `addHeader` is for headers that are required for authorization or to work with -a proxy that in front of Elasticsearch. There is no need to set the -`Content-Type` header because the client will automatically set that from the -`HttpEntity` attached to the request. +a proxy in front of Elasticsearch. There is no need to set the `Content-Type` +header because the client will automatically set that from the `HttpEntity` +attached to the request. You can set the `NodeSelector` which controls which nodes will receive requests. `NodeSelector.NOT_MASTER_ONLY` is a good choice.