Skip to content

Commit

Permalink
Merge remote-tracking branch 'elastic/master' into system-jvm-options
Browse files Browse the repository at this point in the history
* elastic/master:
  [Docs] Fix opType options in IndexRequest API example. (elastic#48290)
  Simplify Shard Snapshot Upload Code (elastic#48155)
  Mute ClassificationIT tests (elastic#48338)
  Reenable azure repository tests and remove some randomization in http servers  (elastic#48283)
  Use an env var for the classpath of jar hell task (elastic#48240)
  Refactor FIPS BootstrapChecks to simple checks (elastic#47499)
  Add "format" to "range" queries resulted from optimizing a logical AND (elastic#48073)
  [DOCS][Transform] document limitation regarding rolling upgrade with 7.2, 7.3 (elastic#48118)
  Fail with a better error when if there are no ingest nodes (elastic#48272)
  Fix executing enrich policies stats (elastic#48132)
  Use MultiFileTransfer in CCR remote recovery (elastic#44514)
  Make BytesReference an interface (elastic#48171)
  Also validate source index at put enrich policy time. (elastic#48254)
  Add 'javadoc' task to lifecycle check tasks (elastic#48214)
  Remove option to enable direct buffer pooling (elastic#47956)
  [DOCS] Add 'Selecting gateway and seed nodes' section to CCS docs (elastic#48297)
  Add Enrich Origin (elastic#48098)
  fix incorrect comparison (elastic#48208)
  • Loading branch information
jasontedor committed Oct 22, 2019
2 parents eb67200 + ef240d8 commit 07bd9d8
Show file tree
Hide file tree
Showing 90 changed files with 1,461 additions and 1,053 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@
* Since how to reap a given service is platform and service dependent, this tool
* operates on system commands to execute. It takes a single argument, a directory
* that will contain files with reaping commands. Each line in each file will be
* executed with {@link Runtime#getRuntime()#exec}.
* executed with {@link Runtime#exec(String)}.
*
* The main method will wait indefinitely on the parent process (Gradle) by
* reading from stdin. When Gradle shuts down, whether normally or abruptly, the
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -676,6 +676,10 @@ class BuildPlugin implements Plugin<Project> {
*/
(javadoc.options as CoreJavadocOptions).addBooleanOption('html5', true)
}
// ensure javadoc task is run with 'check'
project.pluginManager.withPlugin('lifecycle-base') {
project.tasks.getByName(LifecycleBasePlugin.CHECK_TASK_NAME).dependsOn(project.tasks.withType(Javadoc))
}
configureJavadocJar(project)
}

Expand Down Expand Up @@ -889,7 +893,6 @@ class BuildPlugin implements Plugin<Project> {
test.systemProperty('io.netty.noUnsafe', 'true')
test.systemProperty('io.netty.noKeySetOptimization', 'true')
test.systemProperty('io.netty.recycler.maxCapacityPerThread', '0')
test.systemProperty('io.netty.allocator.numDirectArenas', '0')

test.testLogging { TestLoggingContainer logging ->
logging.showExceptions = true
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ public JarHellTask() {
@TaskAction
public void runJarHellCheck() {
LoggedExec.javaexec(getProject(), spec -> {
spec.classpath(getClasspath());
spec.environment("CLASSPATH", getClasspath().getAsPath());
spec.setMain("org.elasticsearch.bootstrap.JarHell");
});
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ public boolean equals(Object obj) {
&& Objects.equals(id, other.id)
&& docVersion == other.docVersion
&& found == other.found
&& tookInMillis == tookInMillis
&& tookInMillis == other.tookInMillis
&& Objects.equals(termVectorList, other.termVectorList);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@

import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.XContentBuilder;
Expand Down Expand Up @@ -60,7 +59,7 @@ private static void declareParserOptions(ConstructingObjectParser<?, ?> parser)
parser.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> {
XContentBuilder builder = XContentBuilder.builder(p.contentType().xContent());
builder.copyCurrentStructure(p);
return BytesArray.bytes(builder);
return BytesReference.bytes(builder);
}, QUERY_FIELD);
parser.declareStringArray(ConstructingObjectParser.constructorArg(), INDICES_FIELD);
parser.declareString(ConstructingObjectParser.constructorArg(), MATCH_FIELD_FIELD);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@

import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
Expand Down Expand Up @@ -86,7 +85,7 @@ public class IndexLifecycleExplainResponse implements ToXContentObject {
PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> {
XContentBuilder builder = JsonXContent.contentBuilder();
builder.copyCurrentStructure(p);
return BytesArray.bytes(builder);
return BytesReference.bytes(builder);
}, STEP_INFO_FIELD);
PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> PhaseExecutionInfo.parse(p, ""),
PHASE_EXECUTION_INFO);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,10 @@ public void cleanup() {

public void testPutPolicy() throws Exception {
RestHighLevelClient client = highLevelClient();
CreateIndexRequest createIndexRequest = new CreateIndexRequest("users")
.mapping(Map.of("properties", Map.of("email", Map.of("type", "keyword"))));
client.indices().create(createIndexRequest, RequestOptions.DEFAULT);

// tag::enrich-put-policy-request
PutPolicyRequest putPolicyRequest = new PutPolicyRequest(
"users-policy", "match", List.of("users"),
Expand Down Expand Up @@ -104,6 +108,10 @@ public void testDeletePolicy() throws Exception {
RestHighLevelClient client = highLevelClient();

{
CreateIndexRequest createIndexRequest = new CreateIndexRequest("users")
.mapping(Map.of("properties", Map.of("email", Map.of("type", "keyword"))));
client.indices().create(createIndexRequest, RequestOptions.DEFAULT);

// Add a policy, so that it can be deleted:
PutPolicyRequest putPolicyRequest = new PutPolicyRequest(
"users-policy", "match", List.of("users"),
Expand Down Expand Up @@ -155,6 +163,10 @@ public void onFailure(Exception e) {
public void testGetPolicy() throws Exception {
RestHighLevelClient client = highLevelClient();

CreateIndexRequest createIndexRequest = new CreateIndexRequest("users")
.mapping(Map.of("properties", Map.of("email", Map.of("type", "keyword"))));
client.indices().create(createIndexRequest, RequestOptions.DEFAULT);

PutPolicyRequest putPolicyRequest = new PutPolicyRequest(
"users-policy", "match", List.of("users"),
"email", List.of("address", "zip", "city", "state"));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@
package org.elasticsearch.client.enrich;

import org.elasticsearch.client.AbstractResponseTestCase;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
Expand Down Expand Up @@ -80,7 +79,7 @@ private static EnrichPolicy createRandomEnrichPolicy(XContentType xContentType){
try (XContentBuilder builder = XContentBuilder.builder(xContentType.xContent())) {
builder.startObject();
builder.endObject();
BytesReference querySource = BytesArray.bytes(builder);
BytesReference querySource = BytesReference.bytes(builder);
return new EnrichPolicy(
randomAlphaOfLength(4),
randomBoolean() ? new EnrichPolicy.QuerySource(querySource, xContentType) : null,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,14 +55,6 @@ static List<String> choose(final List<String> userDefinedJvmOptions) throws Inte
final List<String> ergonomicChoices = new ArrayList<>();
final Map<String, Optional<String>> finalJvmOptions = finalJvmOptions(userDefinedJvmOptions);
final long heapSize = extractHeapSize(finalJvmOptions);
final Map<String, String> systemProperties = extractSystemProperties(userDefinedJvmOptions);
if (systemProperties.containsKey("io.netty.allocator.type") == false) {
if (heapSize <= 1 << 30) {
ergonomicChoices.add("-Dio.netty.allocator.type=unpooled");
} else {
ergonomicChoices.add("-Dio.netty.allocator.type=pooled");
}
}
final long maxDirectMemorySize = extractMaxDirectMemorySize(finalJvmOptions);
if (maxDirectMemorySize == 0) {
ergonomicChoices.add("-XX:MaxDirectMemorySize=" + heapSize / 2);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -117,20 +117,6 @@ public void testExtractNoSystemProperties() {
assertTrue(parsedSystemProperties.isEmpty());
}

public void testPooledMemoryChoiceOnSmallHeap() throws InterruptedException, IOException {
final String smallHeap = randomFrom(Arrays.asList("64M", "512M", "1024M", "1G"));
assertThat(
JvmErgonomics.choose(Arrays.asList("-Xms" + smallHeap, "-Xmx" + smallHeap)),
hasItem("-Dio.netty.allocator.type=unpooled"));
}

public void testPooledMemoryChoiceOnNotSmallHeap() throws InterruptedException, IOException {
final String largeHeap = randomFrom(Arrays.asList("1025M", "2048M", "2G", "8G"));
assertThat(
JvmErgonomics.choose(Arrays.asList("-Xms" + largeHeap, "-Xmx" + largeHeap)),
hasItem("-Dio.netty.allocator.type=pooled"));
}

public void testMaxDirectMemorySizeChoice() throws InterruptedException, IOException {
final Map<String, String> heapMaxDirectMemorySize = Map.of(
"64M", Long.toString((64L << 20) / 2),
Expand Down
2 changes: 1 addition & 1 deletion docs/java-rest/high-level/document/index.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ include-tagged::{doc-tests-file}[{api}-request-version-type]
include-tagged::{doc-tests-file}[{api}-request-op-type]
--------------------------------------------------
<1> Operation type provided as an `DocWriteRequest.OpType` value
<2> Operation type provided as a `String`: can be `create` or `update` (default)
<2> Operation type provided as a `String`: can be `create` or `index` (default)

["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,13 @@ Deletes an existing enrich policy and its enrich index.
[source,console]
----
PUT /users
{
"mappings" : {
"properties" : {
"email" : { "type" : "keyword" }
}
}
}
PUT /_enrich/policy/my-policy
{
Expand Down
7 changes: 7 additions & 0 deletions docs/reference/ingest/apis/enrich/get-enrich-policy.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,13 @@ Returns information about an enrich policy.
[source,console]
----
PUT /users
{
"mappings" : {
"properties" : {
"email" : { "type" : "keyword" }
}
}
}
PUT /_enrich/policy/my-policy
{
Expand Down
7 changes: 7 additions & 0 deletions docs/reference/ingest/apis/enrich/put-enrich-policy.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,13 @@ Creates an enrich policy.
[source,console]
----
PUT /users
{
"mappings" : {
"properties" : {
"email" : { "type" : "keyword" }
}
}
}
----
////

Expand Down
30 changes: 25 additions & 5 deletions docs/reference/modules/cross-cluster-search.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -242,9 +242,31 @@ PUT _cluster/settings
If `cluster_two` is disconnected or unavailable during a {ccs}, {es} won't
include matching documents from that cluster in the final results.

[float]
[discrete]
[[ccs-works]]
== How {ccs} works

include::./remote-clusters.asciidoc[tag=how-remote-clusters-work]

[discrete]
[[ccs-gateway-seed-nodes]]
=== Selecting gateway and seed nodes

Gateway and seed nodes need to be accessible from the local cluster via your
network.

By default, any master-ineligible node can act as a gateway node. If wanted,
you can define the gateway nodes for a cluster by setting
`cluster.remote.node.attr.gateway` to `true`.

For {ccs}, we recommend you use gateway nodes that are capable of serving as
<<coordinating-node,coordinating nodes>> for search requests. If
wanted, the seed nodes for a cluster can be a subset of these gateway nodes.

[discrete]
[[ccs-network-delays]]
=== How {ccs} handles network delays

Because {ccs} involves sending requests to remote clusters, any network delays
can impact search speed. To avoid slow searches, {ccs} offers two options for
handling network delays:
Expand All @@ -268,11 +290,9 @@ latency.
+
See <<ccs-unmin-roundtrips>> to learn how this option works.



[float]
[[ccs-min-roundtrips]]
=== Minimize network roundtrips
==== Minimize network roundtrips

Here's how {ccs} works when you minimize network roundtrips.

Expand All @@ -297,7 +317,7 @@ image:images/ccs/ccs-min-roundtrip-client-response.png[]

[float]
[[ccs-unmin-roundtrips]]
=== Don't minimize network roundtrips
==== Don't minimize network roundtrips

Here's how {ccs} works when you don't minimize network roundtrips.

Expand Down
6 changes: 5 additions & 1 deletion docs/reference/modules/remote-clusters.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,16 @@ connections to a remote cluster. This functionality is used in
<<modules-cross-cluster-search,{ccs}>>.
endif::[]

// tag::how-remote-clusters-work[]
Remote cluster connections work by configuring a remote cluster and connecting
only to a limited number of nodes in that remote cluster. Each remote cluster
is referenced by a name and a list of seed nodes. When a remote cluster is
registered, its cluster state is retrieved from one of the seed nodes and up
to three _gateway nodes_ are selected to be connected to as part of remote
cluster requests. All the communication required between different clusters
cluster requests.
// end::how-remote-clusters-work[]

All the communication required between different clusters
goes through the <<modules-transport,transport layer>>. Remote cluster
connections consist of uni-directional connections from the coordinating
node to the selected remote _gateway nodes_ only.
Expand Down
14 changes: 11 additions & 3 deletions docs/reference/transform/limitations.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,14 @@ upgrade from 7.2 to a newer version, and {transforms} have been created in 7.2,
the {transforms} UI (earler {dataframe} UI) will not work. Please wait until all
nodes have been upgraded to the newer version before using the {transforms} UI.

[float]
[[transform-rolling-upgrade-limitation]]
==== {transforms-cap} reassignment suspended during a rolling upgrade from 7.2 and 7.3

If your cluster contains mixed version nodes, for example during a rolling
upgrade from 7.2 or 7.3 to a newer version, {transforms} whose nodes are stopped will
not be reassigned until the upgrade is complete. After the upgrade is done, {transforms}
resume automatically; no action is required.

[float]
[[transform-datatype-limitations]]
Expand Down Expand Up @@ -181,9 +189,9 @@ for the {transform} checkpoint to complete.

[float]
[[transform-scheduling-limitations]]
==== {cdataframe-cap} scheduling limitations
==== {ctransform-cap} scheduling limitations

A {cdataframe} periodically checks for changes to source data. The functionality
A {ctransform} periodically checks for changes to source data. The functionality
of the scheduler is currently limited to a basic periodic timer which can be
within the `frequency` range from 1s to 1h. The default is 1m. This is designed
to run little and often. When choosing a `frequency` for this timer consider
Expand All @@ -206,7 +214,7 @@ When using the API to delete a failed {transform}, first stop it using

[float]
[[transform-availability-limitations]]
==== {cdataframes-cap} may give incorrect results if documents are not yet available to search
==== {ctransforms-cap} may give incorrect results if documents are not yet available to search

After a document is indexed, there is a very small delay until it is available
to search.
Expand Down
4 changes: 2 additions & 2 deletions modules/transport-netty4/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ integTestRunner {
TaskProvider<Test> pooledTest = tasks.register("pooledTest", Test) {
include '**/*Tests.class'
systemProperty 'es.set.netty.runtime.available.processors', 'false'
systemProperty 'io.netty.allocator.type', 'pooled'
systemProperty 'es.use_unpooled_allocator', 'false'
}
// TODO: we can't use task avoidance here because RestIntegTestTask does the testcluster creation
RestIntegTestTask pooledIntegTest = tasks.create("pooledIntegTest", RestIntegTestTask) {
Expand All @@ -75,7 +75,7 @@ RestIntegTestTask pooledIntegTest = tasks.create("pooledIntegTest", RestIntegTes
}
}
testClusters.pooledIntegTest {
systemProperty 'io.netty.allocator.type', 'pooled'
systemProperty 'es.use_unpooled_allocator', 'false'
}
check.dependsOn(pooledTest, pooledIntegTest)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
package org.elasticsearch.http.netty4;

import io.netty.bootstrap.ServerBootstrap;
import io.netty.buffer.ByteBufAllocator;
import io.netty.channel.Channel;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelHandler;
Expand All @@ -32,7 +31,6 @@
import io.netty.channel.RecvByteBufAllocator;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.nio.NioChannelOption;
import io.netty.channel.socket.nio.NioServerSocketChannel;
import io.netty.handler.codec.ByteToMessageDecoder;
import io.netty.handler.codec.http.HttpContentCompressor;
import io.netty.handler.codec.http.HttpContentDecompressor;
Expand Down Expand Up @@ -63,7 +61,7 @@
import org.elasticsearch.http.HttpServerChannel;
import org.elasticsearch.http.netty4.cors.Netty4CorsHandler;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.CopyBytesServerSocketChannel;
import org.elasticsearch.transport.NettyAllocator;
import org.elasticsearch.transport.netty4.Netty4Utils;

import java.net.InetSocketAddress;
Expand Down Expand Up @@ -186,14 +184,12 @@ protected void doStart() {
serverBootstrap.group(new NioEventLoopGroup(workerCount, daemonThreadFactory(settings,
HTTP_SERVER_WORKER_THREAD_NAME_PREFIX)));

// If direct buffer pooling is disabled, use the CopyBytesServerSocketChannel which will create child
// channels of type CopyBytesSocketChannel. CopyBytesSocketChannel pool a single direct buffer
// per-event-loop thread to be used for IO operations.
if (ByteBufAllocator.DEFAULT.isDirectBufferPooled()) {
serverBootstrap.channel(NioServerSocketChannel.class);
} else {
serverBootstrap.channel(CopyBytesServerSocketChannel.class);
}
// NettyAllocator will return the channel type designed to work with the configuredAllocator
serverBootstrap.channel(NettyAllocator.getServerChannelType());

// Set the allocators for both the server channel and the child channels created
serverBootstrap.option(ChannelOption.ALLOCATOR, NettyAllocator.getAllocator());
serverBootstrap.childOption(ChannelOption.ALLOCATOR, NettyAllocator.getAllocator());

serverBootstrap.childHandler(configureServerChannelHandler());
serverBootstrap.handler(new ServerChannelExceptionHandler(this));
Expand Down
Loading

0 comments on commit 07bd9d8

Please sign in to comment.