Skip to content

Commit

Permalink
Merge branch 'master' into check-for-close
Browse files Browse the repository at this point in the history
* master:
  Fix handling of paths containing parentheses
  Allow only a fixed-size receive predictor (elastic#26165)
  Add Homebrew instructions to getting started
  ingest: Fix bug that prevent date_index_name processor from accepting timestamps specified as a json number
  Scripting: Fix expressions to temporarily support filter scripts (elastic#26824)
  Docs: Add note to contributing docs warning against tool based refactoring (elastic#26936)
  Fix thread context handling of headers overriding (elastic#26068)
  SearchWhileCreatingIndexIT: remove usage of _only_nodes
  • Loading branch information
jasontedor committed Oct 10, 2017
2 parents 36c2753 + c03f0c8 commit 7d2e7f4
Show file tree
Hide file tree
Showing 12 changed files with 91 additions and 54 deletions.
5 changes: 5 additions & 0 deletions CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,11 @@ If you have a bugfix or new feature that you would like to contribute to Elastic

We enjoy working with contributors to get their code accepted. There are many approaches to fixing a problem and it is important to find the best approach before writing too much code.

Note that it is unlikely the project will merge refactors for the sake of refactoring. These
types of pull requests have a high cost to maintainers in reviewing and testing with little
to no tangible benefit. This especially includes changes generated by tools. For example,
converting all generic interface instances to use the diamond operator.

The process for contributing to any of the [Elastic repositories](https://github.com/elastic/) is similar. Details for individual projects can be found below.

### Fork and clone the repository
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -407,11 +407,10 @@ private ThreadContextStruct putHeaders(Map<String, String> headers) {
if (headers.isEmpty()) {
return this;
} else {
final Map<String, String> newHeaders = new HashMap<>();
final Map<String, String> newHeaders = new HashMap<>(this.requestHeaders);
for (Map.Entry<String, String> entry : headers.entrySet()) {
putSingleHeader(entry.getKey(), entry.getValue(), newHeaders);
}
newHeaders.putAll(this.requestHeaders);
return new ThreadContextStruct(newHeaders, responseHeaders, transientHeaders, isSystemContext);
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@
import java.util.List;
import java.util.Map;
import java.util.function.Supplier;

import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasItem;
import static org.hamcrest.Matchers.hasSize;
Expand Down Expand Up @@ -215,8 +214,8 @@ public void testResponseHeaders() {
public void testCopyHeaders() {
Settings build = Settings.builder().put("request.headers.default", "1").build();
ThreadContext threadContext = new ThreadContext(build);
threadContext.copyHeaders(Collections.<String,String>emptyMap().entrySet());
threadContext.copyHeaders(Collections.<String,String>singletonMap("foo", "bar").entrySet());
threadContext.copyHeaders(Collections.<String, String>emptyMap().entrySet());
threadContext.copyHeaders(Collections.<String, String>singletonMap("foo", "bar").entrySet());
assertEquals("bar", threadContext.getHeader("foo"));
}

Expand Down Expand Up @@ -443,7 +442,7 @@ public void onAfter() {
assertEquals("bar", threadContext.getHeader("foo"));
assertEquals("bar_transient", threadContext.getTransient("foo"));
assertNotNull(threadContext.getTransient("failure"));
assertEquals("exception from doRun", ((RuntimeException)threadContext.getTransient("failure")).getMessage());
assertEquals("exception from doRun", ((RuntimeException) threadContext.getTransient("failure")).getMessage());
assertFalse(threadContext.isDefaultContext());
threadContext.putTransient("after", "after");
}
Expand Down Expand Up @@ -604,7 +603,7 @@ protected void doRun() throws Exception {
public void testMarkAsSystemContext() throws IOException {
try (ThreadContext threadContext = new ThreadContext(Settings.EMPTY)) {
assertFalse(threadContext.isSystemContext());
try(ThreadContext.StoredContext context = threadContext.stashContext()){
try (ThreadContext.StoredContext context = threadContext.stashContext()) {
assertFalse(threadContext.isSystemContext());
threadContext.markAsSystemContext();
assertTrue(threadContext.isSystemContext());
Expand All @@ -613,6 +612,17 @@ public void testMarkAsSystemContext() throws IOException {
}
}

public void testPutHeaders() {
Settings build = Settings.builder().put("request.headers.default", "1").build();
ThreadContext threadContext = new ThreadContext(build);
threadContext.putHeader(Collections.<String, String>emptyMap());
threadContext.putHeader(Collections.<String, String>singletonMap("foo", "bar"));
assertEquals("bar", threadContext.getHeader("foo"));
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () ->
threadContext.putHeader(Collections.<String, String>singletonMap("foo", "boom")));
assertEquals("value for key [foo] already present", e.getMessage());
}

/**
* Sometimes wraps a Runnable in an AbstractRunnable.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.junit.annotations.TestLogging;
Expand Down Expand Up @@ -74,21 +73,13 @@ private void searchWhileCreatingIndex(boolean createIndex, int numberOfReplicas)

logger.info("using preference {}", preference);
// we want to make sure that while recovery happens, and a replica gets recovered, its properly refreshed
ClusterHealthStatus status = client().admin().cluster().prepareHealth("test").get().getStatus();

ClusterHealthStatus status = client().admin().cluster().prepareHealth("test").get().getStatus();;
while (status != ClusterHealthStatus.GREEN) {
// first, verify that search on the primary search works
for (IndexShardRoutingTable shardRoutingTable : clusterService().state().routingTable().index("test")) {
String primaryNode = shardRoutingTable.primaryShard().currentNodeId();
SearchResponse searchResponse = client().prepareSearch("test")
.setPreference("_only_nodes:" + primaryNode)
.setQuery(QueryBuilders.termQuery("field", "test"))
.execute().actionGet();
assertHitCount(searchResponse, 1);
break;
}
// first, verify that search normal search works
SearchResponse searchResponse = client().prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "test")).execute().actionGet();
assertHitCount(searchResponse, 1);
Client client = client();
SearchResponse searchResponse = client.prepareSearch("test").setPreference(preference + Integer.toString(counter++)).setQuery(QueryBuilders.termQuery("field", "test")).execute().actionGet();
searchResponse = client.prepareSearch("test").setPreference(preference + Integer.toString(counter++)).setQuery(QueryBuilders.termQuery("field", "test")).execute().actionGet();
if (searchResponse.getHits().getTotalHits() != 1) {
refresh();
SearchResponse searchResponseAfterRefresh = client.prepareSearch("test").setPreference(preference).setQuery(QueryBuilders.termQuery("field", "test")).execute().actionGet();
Expand All @@ -102,13 +93,6 @@ private void searchWhileCreatingIndex(boolean createIndex, int numberOfReplicas)
status = client().admin().cluster().prepareHealth("test").get().getStatus();
internalCluster().ensureAtLeastNumDataNodes(numberOfReplicas + 1);
}

for (String node : internalCluster().nodesInclude("test")) {
SearchResponse searchResponse = client().prepareSearch("test")
.setPreference("_prefer_nodes:" + node)
.setQuery(QueryBuilders.termQuery("field", "test")).execute().actionGet();
assertHitCount(searchResponse, 1);
}
cluster().wipeIndices("test");
}
}
6 changes: 3 additions & 3 deletions distribution/src/main/resources/bin/elasticsearch-service.bat
Original file line number Diff line number Diff line change
Expand Up @@ -163,15 +163,15 @@ for %%a in ("%ES_JAVA_OPTS:;=","%") do (
@endlocal & set JVM_MS=%JVM_MS% & set JVM_MX=%JVM_MX% & set JVM_SS=%JVM_SS%

if "%JVM_MS%" == "" (
echo minimum heap size not set; configure using -Xms via %ES_JVM_OPTIONS% or ES_JAVA_OPTS
echo minimum heap size not set; configure using -Xms via "%ES_JVM_OPTIONS%" or ES_JAVA_OPTS
goto:eof
)
if "%JVM_MX%" == "" (
echo maximum heap size not set; configure using -Xmx via %ES_JVM_OPTIONS% or ES_JAVA_OPTS
echo maximum heap size not set; configure using -Xmx via "%ES_JVM_OPTIONS%" or ES_JAVA_OPTS
goto:eof
)
if "%JVM_SS%" == "" (
echo thread stack size not set; configure using -Xss via %ES_JVM_OPTIONS% or ES_JAVA_OPTS
echo thread stack size not set; configure using -Xss via "%ES_JVM_OPTIONS%" or ES_JAVA_OPTS
goto:eof
)

Expand Down
10 changes: 10 additions & 0 deletions docs/reference/getting-started.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -148,6 +148,16 @@ And now we are ready to start our node and single cluster:
./elasticsearch
--------------------------------------------------

[float]
=== Instalation with Homebrew

On macOS, Elasticsearch can also be installed via https://brew.sh[Homebrew]:

["source","sh"]
--------------------------------------------------
brew install elasticsearch
--------------------------------------------------

[float]
=== Installation example with MSI Windows Installer

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.function.Function;

import org.elasticsearch.ExceptionsHelper;
Expand Down Expand Up @@ -61,7 +62,8 @@ public final class DateIndexNameProcessor extends AbstractProcessor {

@Override
public void execute(IngestDocument ingestDocument) throws Exception {
String date = ingestDocument.getFieldValue(field, String.class);
// Date can be specified as a string or long:
String date = Objects.toString(ingestDocument.getFieldValue(field, Object.class));

DateTime dateTime = null;
Exception lastException = null;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,11 @@ public void testUnixMs()throws Exception {
Collections.singletonMap("_field", "1000500"));
dateProcessor.execute(document);
assertThat(document.getSourceAndMetadata().get("_index"), equalTo("<events-{19700101||/m{yyyyMMdd|UTC}}>"));

document = new IngestDocument("_index", "_type", "_id", null, null,
Collections.singletonMap("_field", 1000500L));
dateProcessor.execute(document);
assertThat(document.getSourceAndMetadata().get("_index"), equalTo("<events-{19700101||/m{yyyyMMdd|UTC}}>"));
}

public void testUnix()throws Exception {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.script.ClassPermission;
import org.elasticsearch.script.ExecutableScript;
import org.elasticsearch.script.FilterScript;
import org.elasticsearch.script.ScriptContext;
import org.elasticsearch.script.ScriptEngine;
import org.elasticsearch.script.ScriptException;
Expand Down Expand Up @@ -107,6 +108,9 @@ protected Class<?> loadClass(String name, boolean resolve) throws ClassNotFoundE
} else if (context.instanceClazz.equals(ExecutableScript.class)) {
ExecutableScript.Factory factory = (p) -> new ExpressionExecutableScript(expr, p);
return context.factoryClazz.cast(factory);
} else if (context.instanceClazz.equals(FilterScript.class)) {
FilterScript.Factory factory = (p, lookup) -> newFilterScript(expr, lookup, p);
return context.factoryClazz.cast(factory);
}
throw new IllegalArgumentException("expression engine does not know how to handle script context [" + context.name + "]");
}
Expand Down Expand Up @@ -236,6 +240,27 @@ private SearchScript.LeafFactory newSearchScript(Expression expr, SearchLookup l
return new ExpressionSearchScript(expr, bindings, specialValue, needsScores);
}

/**
* This is a hack for filter scripts, which must return booleans instead of doubles as expression do.
* See https://github.com/elastic/elasticsearch/issues/26429.
*/
private FilterScript.LeafFactory newFilterScript(Expression expr, SearchLookup lookup, @Nullable Map<String, Object> vars) {
SearchScript.LeafFactory searchLeafFactory = newSearchScript(expr, lookup, vars);
return ctx -> {
SearchScript script = searchLeafFactory.newInstance(ctx);
return new FilterScript(vars, lookup, ctx) {
@Override
public boolean execute() {
return script.runAsDouble() != 0.0;
}
@Override
public void setDocument(int docid) {
script.setDocument(docid);
}
};
};
}

/**
* converts a ParseException at compile-time or link-time to a ScriptException
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -700,4 +700,19 @@ public void testBoolean() throws Exception {
assertEquals(2.0D, rsp.getHits().getAt(1).field("foo").getValue(), 1.0D);
assertEquals(2.0D, rsp.getHits().getAt(2).field("foo").getValue(), 1.0D);
}

public void testFilterScript() throws Exception {
createIndex("test");
ensureGreen("test");
indexRandom(true,
client().prepareIndex("test", "doc", "1").setSource("foo", 1.0),
client().prepareIndex("test", "doc", "2").setSource("foo", 0.0));
SearchRequestBuilder builder = buildRequest("doc['foo'].value");
Script script = new Script(ScriptType.INLINE, "expression", "doc['foo'].value", Collections.emptyMap());
builder.setQuery(QueryBuilders.boolQuery().filter(QueryBuilders.scriptQuery(script)));
SearchResponse rsp = builder.get();
assertSearchResponse(rsp);
assertEquals(1, rsp.getHits().getTotalHits());
assertEquals(1.0D, rsp.getHits().getAt(0).field("foo").getValue(), 0.0D);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@
import com.carrotsearch.hppc.IntHashSet;
import com.carrotsearch.hppc.IntSet;
import io.netty.bootstrap.ServerBootstrap;
import io.netty.channel.AdaptiveRecvByteBufAllocator;
import io.netty.channel.Channel;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelHandler;
Expand Down Expand Up @@ -88,7 +87,6 @@
import java.util.regex.Pattern;

import static org.elasticsearch.common.settings.Setting.boolSetting;
import static org.elasticsearch.common.settings.Setting.byteSizeSetting;
import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_HEADERS;
Expand Down Expand Up @@ -132,10 +130,6 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem

public static final Setting<ByteSizeValue> SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE =
Setting.byteSizeSetting("http.netty.receive_predictor_size", new ByteSizeValue(64, ByteSizeUnit.KB), Property.NodeScope);
public static final Setting<ByteSizeValue> SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MIN =
byteSizeSetting("http.netty.receive_predictor_min", SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE, Property.NodeScope);
public static final Setting<ByteSizeValue> SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MAX =
byteSizeSetting("http.netty.receive_predictor_max", SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE, Property.NodeScope);


protected final NetworkService networkService;
Expand Down Expand Up @@ -227,17 +221,8 @@ public Netty4HttpServerTransport(Settings settings, NetworkService networkServic
this.tcpReceiveBufferSize = SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE.get(settings);
this.detailedErrorsEnabled = SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings);

// See AdaptiveReceiveBufferSizePredictor#DEFAULT_XXX for default values in netty..., we can use higher ones for us, even fixed one
ByteSizeValue receivePredictorMin = SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MIN.get(settings);
ByteSizeValue receivePredictorMax = SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MAX.get(settings);
if (receivePredictorMax.getBytes() == receivePredictorMin.getBytes()) {
recvByteBufAllocator = new FixedRecvByteBufAllocator(Math.toIntExact(receivePredictorMax.getBytes()));
} else {
recvByteBufAllocator = new AdaptiveRecvByteBufAllocator(
Math.toIntExact(receivePredictorMin.getBytes()),
Math.toIntExact(receivePredictorMin.getBytes()),
Math.toIntExact(receivePredictorMax.getBytes()));
}
ByteSizeValue receivePredictor = SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE.get(settings);
recvByteBufAllocator = new FixedRecvByteBufAllocator(receivePredictor.bytesAsInt());

this.compression = SETTING_HTTP_COMPRESSION.get(settings);
this.compressionLevel = SETTING_HTTP_COMPRESSION_LEVEL.get(settings);
Expand All @@ -253,9 +238,8 @@ public Netty4HttpServerTransport(Settings settings, NetworkService networkServic
this.maxContentLength = maxContentLength;

logger.debug("using max_chunk_size[{}], max_header_size[{}], max_initial_line_length[{}], max_content_length[{}], " +
"receive_predictor[{}->{}], pipelining[{}], pipelining_max_events[{}]",
maxChunkSize, maxHeaderSize, maxInitialLineLength, this.maxContentLength,
receivePredictorMin, receivePredictorMax, pipelining, pipeliningMaxEvents);
"receive_predictor[{}], pipelining[{}], pipelining_max_events[{}]",
maxChunkSize, maxHeaderSize, maxInitialLineLength, this.maxContentLength, receivePredictor, pipelining, pipeliningMaxEvents);
}

public Settings settings() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,8 +56,6 @@ public List<Setting<?>> getSettings() {
Netty4HttpServerTransport.SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS,
Netty4HttpServerTransport.SETTING_HTTP_WORKER_COUNT,
Netty4HttpServerTransport.SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE,
Netty4HttpServerTransport.SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MIN,
Netty4HttpServerTransport.SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MAX,
Netty4Transport.WORKER_COUNT,
Netty4Transport.NETTY_RECEIVE_PREDICTOR_SIZE,
Netty4Transport.NETTY_RECEIVE_PREDICTOR_MIN,
Expand Down

0 comments on commit 7d2e7f4

Please sign in to comment.