Skip to content

Commit

Permalink
Merge branch 'main' into threadpool-merge-scheduler-sort-all-merges
Browse files Browse the repository at this point in the history
  • Loading branch information
albertzaharovits committed Jan 24, 2025
2 parents 4099ac5 + be7635e commit 5554bc2
Show file tree
Hide file tree
Showing 270 changed files with 9,527 additions and 3,112 deletions.
1 change: 1 addition & 0 deletions .gitattributes
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/*.interp li
x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseLexer*.java linguist-generated=true
x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParser*.java linguist-generated=true
x-pack/plugin/esql/src/main/generated/** linguist-generated=true
x-pack/plugin/esql/src/main/generated-src/** linguist-generated=true

# ESQL functions docs are autogenerated. More information at `docs/reference/esql/functions/README.md`
docs/reference/esql/functions/*/** linguist-generated=true
Expand Down
5 changes: 4 additions & 1 deletion benchmarks/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -126,9 +126,12 @@ exit
Grab the async profiler from https://github.com/jvm-profiling-tools/async-profiler
and run `prof async` like so:
```
gradlew -p benchmarks/ run --args 'LongKeyedBucketOrdsBenchmark.multiBucket -prof "async:libPath=/home/nik9000/Downloads/tmp/async-profiler-1.8.3-linux-x64/build/libasyncProfiler.so;dir=/tmp/prof;output=flamegraph"'
gradlew -p benchmarks/ run --args 'LongKeyedBucketOrdsBenchmark.multiBucket -prof "async:libPath=/home/nik9000/Downloads/async-profiler-3.0-29ee888-linux-x64/lib/libasyncProfiler.so;dir=/tmp/prof;output=flamegraph"'
```

Note: As of January 2025 the latest release of async profiler doesn't work
with our JDK but the nightly is fine.

If you are on Mac, this'll warn you that you downloaded the shared library from
the internet. You'll need to go to settings and allow it to run.

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@
import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateTrunc;
import org.elasticsearch.xpack.esql.expression.function.scalar.math.Abs;
import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMin;
import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce;
import org.elasticsearch.xpack.esql.expression.function.scalar.string.RLike;
import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Add;
import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.Equals;
Expand Down Expand Up @@ -96,6 +97,9 @@ public class EvalBenchmark {
"add_double",
"case_1_eager",
"case_1_lazy",
"coalesce_2_noop",
"coalesce_2_eager",
"coalesce_2_lazy",
"date_trunc",
"equal_to_const",
"long_equal_to_long",
Expand Down Expand Up @@ -142,8 +146,34 @@ private static EvalOperator.ExpressionEvaluator evaluator(String operation) {
lhs = new Add(Source.EMPTY, lhs, new Literal(Source.EMPTY, 1L, DataType.LONG));
rhs = new Add(Source.EMPTY, rhs, new Literal(Source.EMPTY, 1L, DataType.LONG));
}
yield EvalMapper.toEvaluator(FOLD_CONTEXT, new Case(Source.EMPTY, condition, List.of(lhs, rhs)), layout(f1, f2))
.get(driverContext);
EvalOperator.ExpressionEvaluator evaluator = EvalMapper.toEvaluator(
FOLD_CONTEXT,
new Case(Source.EMPTY, condition, List.of(lhs, rhs)),
layout(f1, f2)
).get(driverContext);
String desc = operation.endsWith("lazy") ? "CaseLazyEvaluator" : "CaseEagerEvaluator";
if (evaluator.toString().contains(desc) == false) {
throw new IllegalArgumentException("Evaluator was [" + evaluator + "] but expected one containing [" + desc + "]");
}
yield evaluator;
}
case "coalesce_2_noop", "coalesce_2_eager", "coalesce_2_lazy" -> {
FieldAttribute f1 = longField();
FieldAttribute f2 = longField();
Expression lhs = f1;
if (operation.endsWith("lazy")) {
lhs = new Add(Source.EMPTY, lhs, new Literal(Source.EMPTY, 1L, DataType.LONG));
}
EvalOperator.ExpressionEvaluator evaluator = EvalMapper.toEvaluator(
FOLD_CONTEXT,
new Coalesce(Source.EMPTY, lhs, List.of(f2)),
layout(f1, f2)
).get(driverContext);
String desc = operation.endsWith("lazy") ? "CoalesceLazyEvaluator" : "CoalesceEagerEvaluator";
if (evaluator.toString().contains(desc) == false) {
throw new IllegalArgumentException("Evaluator was [" + evaluator + "] but expected one containing [" + desc + "]");
}
yield evaluator;
}
case "date_trunc" -> {
FieldAttribute timestamp = new FieldAttribute(
Expand Down Expand Up @@ -260,6 +290,38 @@ private static void checkExpected(String operation, Page actual) {
}
}
}
case "coalesce_2_noop" -> {
LongVector f1 = actual.<LongBlock>getBlock(0).asVector();
LongVector result = actual.<LongBlock>getBlock(2).asVector();
for (int i = 0; i < BLOCK_LENGTH; i++) {
long expected = f1.getLong(i);
if (result.getLong(i) != expected) {
throw new AssertionError("[" + operation + "] expected [" + expected + "] but was [" + result.getLong(i) + "]");
}
}
}
case "coalesce_2_eager" -> {
LongBlock f1 = actual.<LongBlock>getBlock(0);
LongVector f2 = actual.<LongBlock>getBlock(1).asVector();
LongVector result = actual.<LongBlock>getBlock(2).asVector();
for (int i = 0; i < BLOCK_LENGTH; i++) {
long expected = i % 5 == 0 ? f2.getLong(i) : f1.getLong(f1.getFirstValueIndex(i));
if (result.getLong(i) != expected) {
throw new AssertionError("[" + operation + "] expected [" + expected + "] but was [" + result.getLong(i) + "]");
}
}
}
case "coalesce_2_lazy" -> {
LongBlock f1 = actual.<LongBlock>getBlock(0);
LongVector f2 = actual.<LongBlock>getBlock(1).asVector();
LongVector result = actual.<LongBlock>getBlock(2).asVector();
for (int i = 0; i < BLOCK_LENGTH; i++) {
long expected = i % 5 == 0 ? f2.getLong(i) : f1.getLong(f1.getFirstValueIndex(i)) + 1;
if (result.getLong(i) != expected) {
throw new AssertionError("[" + operation + "] expected [" + expected + "] but was [" + result.getLong(i) + "]");
}
}
}
case "date_trunc" -> {
LongVector v = actual.<LongBlock>getBlock(1).asVector();
long oneDay = TimeValue.timeValueHours(24).millis();
Expand Down Expand Up @@ -304,7 +366,7 @@ private static void checkExpected(String operation, Page actual) {
}
}
}
default -> throw new UnsupportedOperationException();
default -> throw new UnsupportedOperationException(operation);
}
}

Expand All @@ -324,7 +386,7 @@ private static Page page(String operation) {
}
yield new Page(builder.build());
}
case "case_1_eager", "case_1_lazy" -> {
case "case_1_eager", "case_1_lazy", "coalesce_2_noop" -> {
var f1 = blockFactory.newLongBlockBuilder(BLOCK_LENGTH);
var f2 = blockFactory.newLongBlockBuilder(BLOCK_LENGTH);
for (int i = 0; i < BLOCK_LENGTH; i++) {
Expand All @@ -333,6 +395,19 @@ private static Page page(String operation) {
}
yield new Page(f1.build(), f2.build());
}
case "coalesce_2_eager", "coalesce_2_lazy" -> {
var f1 = blockFactory.newLongBlockBuilder(BLOCK_LENGTH);
var f2 = blockFactory.newLongBlockBuilder(BLOCK_LENGTH);
for (int i = 0; i < BLOCK_LENGTH; i++) {
if (i % 5 == 0) {
f1.appendNull();
} else {
f1.appendLong(i);
}
f2.appendLong(-i);
}
yield new Page(f1.build(), f2.build());
}
case "long_equal_to_long" -> {
var lhs = blockFactory.newLongBlockBuilder(BLOCK_LENGTH);
var rhs = blockFactory.newLongBlockBuilder(BLOCK_LENGTH);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ public class ScriptScoreBenchmark {
private final PluginsService pluginsService = new PluginsService(
Settings.EMPTY,
null,
PluginsLoader.createPluginsLoader(Set.of(), PluginsLoader.loadPluginsBundles(Path.of(System.getProperty("plugins.dir"))))
PluginsLoader.createPluginsLoader(Set.of(), PluginsLoader.loadPluginsBundles(Path.of(System.getProperty("plugins.dir"))), Map.of())
);
private final ScriptModule scriptModule = new ScriptModule(Settings.EMPTY, pluginsService.filterPlugins(ScriptPlugin.class).toList());

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.stream.Stream;
Expand Down Expand Up @@ -69,7 +70,7 @@ static List<String> systemJvmOptions(Settings nodeSettings, final Map<String, St
// Pass through distribution type
"-Des.distribution.type=" + distroType
),
maybeEnableNativeAccess(),
maybeEnableNativeAccess(useEntitlements),
maybeOverrideDockerCgroup(distroType),
maybeSetActiveProcessorCount(nodeSettings),
maybeSetReplayFile(distroType, isHotspot),
Expand Down Expand Up @@ -124,11 +125,18 @@ private static Stream<String> maybeSetActiveProcessorCount(Settings nodeSettings
return Stream.empty();
}

private static Stream<String> maybeEnableNativeAccess() {
private static Stream<String> maybeEnableNativeAccess(boolean useEntitlements) {
var enableNativeAccessOptions = new ArrayList<String>();
if (Runtime.version().feature() >= 21) {
return Stream.of("--enable-native-access=org.elasticsearch.nativeaccess,org.apache.lucene.core");
enableNativeAccessOptions.add("--enable-native-access=org.elasticsearch.nativeaccess,org.apache.lucene.core");
if (useEntitlements) {
enableNativeAccessOptions.add("--enable-native-access=ALL-UNNAMED");
if (Runtime.version().feature() >= 24) {
enableNativeAccessOptions.add("--illegal-native-access=deny");
}
}
}
return Stream.empty();
return enableNativeAccessOptions.stream();
}

/*
Expand Down
5 changes: 5 additions & 0 deletions docs/changelog/118122.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 118122
summary: "ES|QL: Partial result on demand for async queries"
area: ES|QL
type: enhancement
issues: []
12 changes: 12 additions & 0 deletions docs/changelog/119072.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
pr: 119072
summary: Turn `_source` meta fieldmapper's mode attribute into a no-op
area: Mapping
type: breaking
issues:
- 118596
breaking:
title: Turn `_source` meta fieldmapper's mode attribute into a no-op
area: Mapping
details: The `mode` mapping attribute of `_source` metadata field mapper has been turned into a no-op. Instead the `index.mapping.source.mode` index setting should be used to configure source mode.
impact: Configuring the `mode` attribute for the `_source` meta field mapper will have no effect on indices created with Elasticsearch 9.0.0 or later. Note that `_source.mode` configured on indices before upgrading to 9.0.0 or later will remain efficive after upgrading.
notable: false
7 changes: 7 additions & 0 deletions docs/changelog/120256.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
pr: 120256
summary: Improve memory aspects of enrich cache
area: Ingest Node
type: enhancement
issues:
- 96050
- 120021
6 changes: 6 additions & 0 deletions docs/changelog/120645.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
pr: 120645
summary: Esql Support date nanos on date diff function
area: ES|QL
type: enhancement
issues:
- 109999
5 changes: 5 additions & 0 deletions docs/changelog/120722.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 120722
summary: Migrate stream to core error parsing
area: Machine Learning
type: enhancement
issues: []
9 changes: 7 additions & 2 deletions docs/reference/esql/esql-across-clusters.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -210,6 +210,7 @@ Which returns:
{
"is_running": false,
"took": 42, <1>
"is_partial": false, <7>
"columns" : [
{
"name" : "COUNT(http.response.status_code)",
Expand Down Expand Up @@ -275,8 +276,9 @@ Which returns:
<2> This section of counters shows all possible cluster search states and how many cluster
searches are currently in that state. The clusters can have one of the following statuses: *running*,
*successful* (searches on all shards were successful), *skipped* (the search
failed on a cluster marked with `skip_unavailable`=`true`) or *failed* (the search
failed on a cluster marked with `skip_unavailable`=`false`).
failed on a cluster marked with `skip_unavailable`=`true`), *failed* (the search
failed on a cluster marked with `skip_unavailable`=`false`) or **partial** (the search was
<<esql-async-query-stop-api, interrupted>> before finishing).
<3> The `_clusters/details` section shows metadata about the search on each cluster.
<4> If you included indices from the local cluster you sent the request to in your {ccs},
it is identified as "(local)".
Expand All @@ -285,6 +287,8 @@ which clusters have slower response times than others.
<6> The shard details for the search on that cluster, including a count of shards that were
skipped due to the can-match phase results. Shards are skipped when they cannot have any matching data
and therefore are not included in the full ES|QL query.
<7> The `is_partial` field is set to `true` if the search has partial results for any reason,
for example if it was interrupted before finishing using the <<esql-async-query-stop-api,async query stop API>>.


The cross-cluster metadata can be used to determine whether any data came back from a cluster.
Expand Down Expand Up @@ -314,6 +318,7 @@ Which returns:
{
"is_running": false,
"took": 55,
"is_partial": false,
"columns": [
... // not shown
],
Expand Down
3 changes: 3 additions & 0 deletions docs/reference/esql/esql-apis.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ overview of {esql} and related tutorials, see <<esql>>.
* <<esql-async-query-api>>
* <<esql-async-query-get-api>>
* <<esql-async-query-delete-api>>
* <<esql-async-query-stop-api>>


include::esql-query-api.asciidoc[]
Expand All @@ -26,3 +27,5 @@ include::esql-async-query-api.asciidoc[]
include::esql-async-query-get-api.asciidoc[]

include::esql-async-query-delete-api.asciidoc[]

include::esql-async-query-stop-api.asciidoc[]
7 changes: 7 additions & 0 deletions docs/reference/esql/esql-async-query-api.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -170,3 +170,10 @@ API>> to get the current status and available results for the query.
(Boolean)
If `true`, the query request is still executing.
--

`is_partial`::
+
--
(Boolean)
If `true`, the query has partial results - for example, as a result of using the <<esql-async-query-stop-api, async query stop API>>.
--
49 changes: 49 additions & 0 deletions docs/reference/esql/esql-async-query-stop-api.asciidoc
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
[[esql-async-query-stop-api]]
=== {esql} async query stop API
++++
<titleabbrev>{esql} async query stop API</titleabbrev>
++++

.New API reference
[sidebar]
--
For the most up-to-date API details, refer to {api-es}/group/endpoint-esql[ES|QL APIs].
--

The <<esql,{esql}>> async query stop API is used to manually stop an async query. Once the stop command is issued,
the query stops processing new data and returns the results that have been already processed. Note that due to the pipelined
nature of {esql} queries, the stop operation is not immediate and may take time to return results.

The results are returned in <<esql-query-api-response-body,the same format>> as the
<<esql-async-query-get-api,{esql} async query get API>>.
If the query has been finished by the time the stop command is issued, the results are returned immediately.

If the query processing has not finished by the time the stop command is issued, the response will have the `is_partial`
field set to `true`.

[source,console]
----
POST /query/async/FkpMRkJGS1gzVDRlM3g4ZzMyRGlLbkEaTXlJZHdNT09TU2VTZVBoNDM3cFZMUToxMDM=/stop
----
// TEST[skip: no access to query ID]

[[esql-async-query-stop-api-request]]
==== {api-request-title}

`POST /_query/async/<query_id>/stop`

[[esql-async-query-stop-api-prereqs]]
==== {api-prereq-title}

* If the {es} {security-features} are enabled, only the authenticated user that submitted the original query request
can stop the query.

[[esql-async-query-stop-api-path-params]]
==== {api-path-parms-title}

`<query_id>`::
(Required, string)
Identifier for the query to stop.
+
A query ID is provided in the <<esql-async-query-api,{esql} async query API>>'s
response for a query that does not complete in the awaited time.
1 change: 1 addition & 0 deletions docs/reference/esql/esql-rest.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -193,6 +193,7 @@ Which returns:
----
{
"took": 28,
"is_partial": false,
"columns": [
{"name": "author", "type": "text"},
{"name": "name", "type": "text"},
Expand Down
Loading

0 comments on commit 5554bc2

Please sign in to comment.