From 3f723a451a57465e8a8d68538e1123d98fb3399c Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Tue, 25 Jan 2022 07:23:55 +0100 Subject: [PATCH 001/100] GeoPolygonDecomposer might fail due to numerical errors when calculating intersection with the dateline (#82953) This change make sure that our intersection point lies either +180 or -180 always. --- .../common/geo/GeoPolygonDecomposer.java | 29 +++++++-------- .../common/geo/GeometryNormalizerTests.java | 35 ++++++++++++++++++- 2 files changed, 46 insertions(+), 18 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeoPolygonDecomposer.java b/server/src/main/java/org/elasticsearch/common/geo/GeoPolygonDecomposer.java index c952bc47ce0d0..44c0b2297f55e 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/GeoPolygonDecomposer.java +++ b/server/src/main/java/org/elasticsearch/common/geo/GeoPolygonDecomposer.java @@ -160,18 +160,6 @@ private static void validateHole(LinearRing shell, LinearRing hole) { } } - private static Point position(Point p1, Point p2, double position) { - if (position == 0) { - return p1; - } else if (position == 1) { - return p2; - } else { - final double x = p1.getX() + position * (p2.getX() - p1.getX()); - final double y = p1.getY() + position * (p2.getY() - p1.getY()); - return new Point(x, y); - } - } - private static int createEdges( int component, boolean orientation, @@ -420,7 +408,7 @@ private static int intersections(double dateline, Edge[] edges) { double position = intersection(p1.getX(), p2.getX(), dateline); if (Double.isNaN(position) == false) { - edges[i].intersection(position); + edges[i].setIntersection(position, dateline); numIntersections++; maxComponent = Math.max(maxComponent, edges[i].component); } @@ -781,13 +769,20 @@ void setNext(Edge next) { } /** - * Set the intersection of this line segment to the given position + * Set the intersection of this line segment with the given dateline * * @param position position of the intersection [0..1] - * @return the {@link Point} of the intersection + * @param dateline of the intersection */ - Point intersection(double position) { - return intersect = position(coordinate, next.coordinate, position); + void setIntersection(double position, double dateline) { + if (position == 0) { + this.intersect = coordinate; + } else if (position == 1) { + this.intersect = next.coordinate; + } else { + final double y = coordinate.getY() + position * (next.coordinate.getY() - coordinate.getY()); + this.intersect = new Point(dateline, y); + } } @Override diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeometryNormalizerTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeometryNormalizerTests.java index 29d6fb50b59fe..e8080e92de6aa 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeometryNormalizerTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeometryNormalizerTests.java @@ -358,7 +358,7 @@ public void testPolygon() { polygon = new Polygon(new LinearRing(new double[] { 1, 0, 0, 1, 1 }, new double[] { 1, 1, 0, 0, 1 })); // for some reason, the normalizer always changes the order of the points indexed = new Polygon(new LinearRing(new double[] { 0, 0, 1, 1, 0 }, new double[] { 1, 0, 0, 1, 1 })); - ; + assertEquals(indexed, GeometryNormalizer.apply(Orientation.CCW, polygon)); assertEquals(false, GeometryNormalizer.needsNormalize(Orientation.CCW, polygon)); @@ -426,4 +426,37 @@ public void testMultiPolygon() { assertEquals(indexed, GeometryNormalizer.apply(Orientation.CCW, multiPolygon)); assertEquals(true, GeometryNormalizer.needsNormalize(Orientation.CCW, multiPolygon)); } + + public void testIssue82840() { + Polygon polygon = new Polygon( + new LinearRing( + new double[] { -143.10690080319134, -143.10690080319134, 62.41055750853541, -143.10690080319134 }, + new double[] { -90.0, -30.033129816260214, -30.033129816260214, -90.0 } + ) + ); + MultiPolygon indexedCCW = new MultiPolygon( + List.of( + new Polygon( + new LinearRing( + new double[] { 180.0, 180.0, 62.41055750853541, 180.0 }, + new double[] { -75.67887564489237, -30.033129816260214, -30.033129816260214, -75.67887564489237 } + ) + ), + new Polygon( + new LinearRing( + new double[] { -180.0, -180.0, -143.10690080319134, -143.10690080319134, -180.0 }, + new double[] { -30.033129816260214, -75.67887564489237, -90.0, -30.033129816260214, -30.033129816260214 } + ) + ) + ) + ); + assertEquals(indexedCCW, GeometryNormalizer.apply(Orientation.CCW, polygon)); + Polygon indexedCW = new Polygon( + new LinearRing( + new double[] { -143.10690080319134, 62.41055750853541, -143.10690080319134, -143.10690080319134 }, + new double[] { -30.033129816260214, -30.033129816260214, -90.0, -30.033129816260214 } + ) + ); + assertEquals(indexedCW, GeometryNormalizer.apply(Orientation.CW, polygon)); + } } From d9f77fa3a6f03d8b1360ae717fb380c0b8110c7e Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Tue, 25 Jan 2022 09:24:12 +0100 Subject: [PATCH 002/100] Allow doc-values only search on ip fields (#82929) Allows searching on ip fields when those fields are not indexed (index: false) but just doc values are enabled. This enables searches on archive data, which has access to doc values but not index structures. When combined with searchable snapshots, it allows downloading only data for a given (doc value) field to quickly filter down to a select set of documents. Relates #81210 and #52728 --- .../mapping/params/doc-values.asciidoc | 4 +- docs/reference/mapping/types/ip.asciidoc | 5 +- docs/reference/query-dsl.asciidoc | 4 +- .../test/field_caps/10_basic.yml | 15 ++ .../test/search/390_doc_values_search.yml | 32 +++ .../index/mapper/IpFieldMapper.java | 66 +++++-- .../index/mapper/IpFieldTypeTests.java | 183 +++++++++++++++--- 7 files changed, 263 insertions(+), 46 deletions(-) diff --git a/docs/reference/mapping/params/doc-values.asciidoc b/docs/reference/mapping/params/doc-values.asciidoc index 122d555036cea..1dc585b3975fd 100644 --- a/docs/reference/mapping/params/doc-values.asciidoc +++ b/docs/reference/mapping/params/doc-values.asciidoc @@ -17,8 +17,8 @@ makes this data access pattern possible. They store the same values as the sorting and aggregations. Doc values are supported on almost all field types, with the __notable exception of `text` and `annotated_text` fields__. -<>, <>, the <> -and the <> +<>, <>, the <>, +the <> and the <> can also be queried using term or range-based queries when they are not <> but only have doc values enabled. Query performance on doc values is much slower than on index structures, but diff --git a/docs/reference/mapping/types/ip.asciidoc b/docs/reference/mapping/types/ip.asciidoc index b81c63da98c10..2e598e40bbacc 100644 --- a/docs/reference/mapping/types/ip.asciidoc +++ b/docs/reference/mapping/types/ip.asciidoc @@ -57,7 +57,10 @@ The following parameters are accepted by `ip` fields: <>:: - Should the field be searchable? Accepts `true` (default) and `false`. + Should the field be quickly searchable? Accepts `true` (default) and + `false`. Fields that only have <> + enabled can still be queried using term or range-based queries, + albeit slower. <>:: diff --git a/docs/reference/query-dsl.asciidoc b/docs/reference/query-dsl.asciidoc index c0fa107ab1468..fa9334f390ed1 100644 --- a/docs/reference/query-dsl.asciidoc +++ b/docs/reference/query-dsl.asciidoc @@ -33,8 +33,8 @@ the stability of the cluster. Those queries can be categorised as follows: * Queries that need to do linear scans to identify matches: ** <> -** queries on <>, <>, <>, or <> fields that are not indexed - but have <> enabled +** queries on <>, <>, <>, <> or <> fields + that are not indexed but have <> enabled * Queries that have a high up-front cost: ** <> (except on diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/field_caps/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/field_caps/10_basic.yml index 06067c6f4c62d..8b9cde1ad6bea 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/field_caps/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/field_caps/10_basic.yml @@ -92,6 +92,9 @@ setup: non_indexed_boolean: type: boolean index: false + non_indexed_ip: + type: ip + index: false geo: type: keyword object: @@ -255,6 +258,18 @@ setup: - match: {fields.non_indexed_boolean.boolean.searchable: true} +--- +"Field caps for ip field with only doc values": + - skip: + version: " - 8.0.99" + reason: "doc values search was added in 8.1.0" + - do: + field_caps: + index: 'test1,test2,test3' + fields: non_indexed_ip + + - match: {fields.non_indexed_ip.ip.searchable: true} + --- "Get object and nested field caps": diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/390_doc_values_search.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/390_doc_values_search.yml index f39f89c876485..323c521f4d128 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/390_doc_values_search.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/390_doc_values_search.yml @@ -42,6 +42,9 @@ setup: boolean: type: boolean index: false + ip: + type: ip + index: false - do: index: @@ -58,6 +61,7 @@ setup: date: "2017/01/01" keyword: "key1" boolean: "false" + ip: "192.168.0.1" - do: index: @@ -74,6 +78,7 @@ setup: date: "2017/01/02" keyword: "key2" boolean: "true" + ip: "192.168.0.2" - do: indices.refresh: {} @@ -284,3 +289,30 @@ setup: index: test body: { query: { range: { boolean: { gte: "false" } } } } - length: { hits.hits: 2 } + +--- +"Test match query on ip field where only doc values are enabled": + + - do: + search: + index: test + body: { query: { match: { ip: { query: "192.168.0.1" } } } } + - length: { hits.hits: 1 } + +--- +"Test terms query on ip field where only doc values are enabled": + + - do: + search: + index: test + body: { query: { terms: { ip: [ "192.168.0.1", "192.168.0.2" ] } } } + - length: { hits.hits: 2 } + +--- +"Test range query on ip field where only doc values are enabled": + + - do: + search: + index: test + body: { query: { range: { ip: { gte: "192.168.0.1" } } } } + - length: { hits.hits: 2 } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java index cdd1c53b97e03..9efbbe27ec8dd 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java @@ -14,6 +14,7 @@ import org.apache.lucene.document.StoredField; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.PointRangeQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; @@ -204,7 +205,15 @@ public IpFieldType( } public IpFieldType(String name) { - this(name, true, false, true, null, null, Collections.emptyMap(), false); + this(name, true, true); + } + + public IpFieldType(String name, boolean isIndexed) { + this(name, isIndexed, true); + } + + public IpFieldType(String name, boolean isIndexed, boolean hasDocValues) { + this(name, isIndexed, false, hasDocValues, null, null, Collections.emptyMap(), false); } @Override @@ -212,6 +221,11 @@ public String typeName() { return CONTENT_TYPE; } + @Override + public boolean isSearchable() { + return isIndexed() || hasDocValues(); + } + @Override public boolean mayExistInIndex(SearchExecutionContext context) { return context.fieldExistsInIndex(name()); @@ -252,9 +266,10 @@ protected Object parseSourceValue(Object value) { @Override public Query termQuery(Object value, @Nullable SearchExecutionContext context) { - failIfNotIndexed(); + failIfNotIndexedNorDocValuesFallback(context); + Query query; if (value instanceof InetAddress) { - return InetAddressPoint.newExactQuery(name(), (InetAddress) value); + query = InetAddressPoint.newExactQuery(name(), (InetAddress) value); } else { if (value instanceof BytesRef) { value = ((BytesRef) value).utf8ToString(); @@ -262,15 +277,37 @@ public Query termQuery(Object value, @Nullable SearchExecutionContext context) { String term = value.toString(); if (term.contains("/")) { final Tuple cidr = InetAddresses.parseCidr(term); - return InetAddressPoint.newPrefixQuery(name(), cidr.v1(), cidr.v2()); + query = InetAddressPoint.newPrefixQuery(name(), cidr.v1(), cidr.v2()); + } else { + InetAddress address = InetAddresses.forString(term); + query = InetAddressPoint.newExactQuery(name(), address); } - InetAddress address = InetAddresses.forString(term); - return InetAddressPoint.newExactQuery(name(), address); + } + if (isIndexed()) { + return query; + } else { + return convertToDocValuesQuery(query); } } + static Query convertToDocValuesQuery(Query query) { + assert query instanceof PointRangeQuery; + PointRangeQuery pointRangeQuery = (PointRangeQuery) query; + return SortedSetDocValuesField.newSlowRangeQuery( + pointRangeQuery.getField(), + new BytesRef(pointRangeQuery.getLowerPoint()), + new BytesRef(pointRangeQuery.getUpperPoint()), + true, + true + ); + } + @Override public Query termsQuery(Collection values, SearchExecutionContext context) { + failIfNotIndexedNorDocValuesFallback(context); + if (isIndexed() == false) { + return super.termsQuery(values, context); + } InetAddress[] addresses = new InetAddress[values.size()]; int i = 0; for (Object value : values) { @@ -301,14 +338,15 @@ public Query rangeQuery( boolean includeUpper, SearchExecutionContext context ) { - failIfNotIndexed(); - return rangeQuery( - lowerTerm, - upperTerm, - includeLower, - includeUpper, - (lower, upper) -> InetAddressPoint.newRangeQuery(name(), lower, upper) - ); + failIfNotIndexedNorDocValuesFallback(context); + return rangeQuery(lowerTerm, upperTerm, includeLower, includeUpper, (lower, upper) -> { + Query query = InetAddressPoint.newRangeQuery(name(), lower, upper); + if (isIndexed()) { + return query; + } else { + return convertToDocValuesQuery(query); + } + }); } /** diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IpFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IpFieldTypeTests.java index 8e7c678f95857..ec0348d23146f 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IpFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IpFieldTypeTests.java @@ -23,6 +23,8 @@ import java.util.Collections; import java.util.List; +import static org.elasticsearch.index.mapper.IpFieldMapper.IpFieldType.convertToDocValuesQuery; + public class IpFieldTypeTests extends FieldTypeTestCase { public void testValueFormat() throws Exception { @@ -51,31 +53,59 @@ public void testTermQuery() { MappedFieldType ft = new IpFieldMapper.IpFieldType("field"); String ip = "2001:db8::2:1"; - assertEquals(InetAddressPoint.newExactQuery("field", InetAddresses.forString(ip)), ft.termQuery(ip, null)); + assertEquals(InetAddressPoint.newExactQuery("field", InetAddresses.forString(ip)), ft.termQuery(ip, MOCK_CONTEXT)); ip = "192.168.1.7"; - assertEquals(InetAddressPoint.newExactQuery("field", InetAddresses.forString(ip)), ft.termQuery(ip, null)); + assertEquals(InetAddressPoint.newExactQuery("field", InetAddresses.forString(ip)), ft.termQuery(ip, MOCK_CONTEXT)); ip = "2001:db8::2:1"; String prefix = ip + "/64"; - assertEquals(InetAddressPoint.newPrefixQuery("field", InetAddresses.forString(ip), 64), ft.termQuery(prefix, null)); + assertEquals(InetAddressPoint.newPrefixQuery("field", InetAddresses.forString(ip), 64), ft.termQuery(prefix, MOCK_CONTEXT)); + + ip = "192.168.1.7"; + prefix = ip + "/16"; + assertEquals(InetAddressPoint.newPrefixQuery("field", InetAddresses.forString(ip), 16), ft.termQuery(prefix, MOCK_CONTEXT)); + + ft = new IpFieldMapper.IpFieldType("field", false); + + ip = "2001:db8::2:1"; + assertEquals( + convertToDocValuesQuery(InetAddressPoint.newExactQuery("field", InetAddresses.forString(ip))), + ft.termQuery(ip, MOCK_CONTEXT) + ); + + ip = "192.168.1.7"; + assertEquals( + convertToDocValuesQuery(InetAddressPoint.newExactQuery("field", InetAddresses.forString(ip))), + ft.termQuery(ip, MOCK_CONTEXT) + ); + + ip = "2001:db8::2:1"; + prefix = ip + "/64"; + assertEquals( + convertToDocValuesQuery(InetAddressPoint.newPrefixQuery("field", InetAddresses.forString(ip), 64)), + ft.termQuery(prefix, MOCK_CONTEXT) + ); ip = "192.168.1.7"; prefix = ip + "/16"; - assertEquals(InetAddressPoint.newPrefixQuery("field", InetAddresses.forString(ip), 16), ft.termQuery(prefix, null)); + assertEquals( + convertToDocValuesQuery(InetAddressPoint.newPrefixQuery("field", InetAddresses.forString(ip), 16)), + ft.termQuery(prefix, MOCK_CONTEXT) + ); MappedFieldType unsearchable = new IpFieldMapper.IpFieldType( "field", false, false, - true, + false, null, null, Collections.emptyMap(), false ); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> unsearchable.termQuery("::1", null)); - assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> unsearchable.termQuery("::1", MOCK_CONTEXT)); + assertEquals("Cannot search on field [field] since it is not indexed nor has doc values.", e.getMessage()); } public void testTermsQuery() { @@ -83,21 +113,21 @@ public void testTermsQuery() { assertEquals( InetAddressPoint.newSetQuery("field", InetAddresses.forString("::2"), InetAddresses.forString("::5")), - ft.termsQuery(Arrays.asList(InetAddresses.forString("::2"), InetAddresses.forString("::5")), null) + ft.termsQuery(Arrays.asList(InetAddresses.forString("::2"), InetAddresses.forString("::5")), MOCK_CONTEXT) ); assertEquals( InetAddressPoint.newSetQuery("field", InetAddresses.forString("::2"), InetAddresses.forString("::5")), - ft.termsQuery(Arrays.asList("::2", "::5"), null) + ft.termsQuery(Arrays.asList("::2", "::5"), MOCK_CONTEXT) ); // if the list includes a prefix query we fallback to a bool query assertEquals( new ConstantScoreQuery( - new BooleanQuery.Builder().add(ft.termQuery("::42", null), Occur.SHOULD) + new BooleanQuery.Builder().add(ft.termQuery("::42", MOCK_CONTEXT), Occur.SHOULD) .add(ft.termQuery("::2/16", null), Occur.SHOULD) .build() ), - ft.termsQuery(Arrays.asList("::42", "::2/16"), null) + ft.termsQuery(Arrays.asList("::42", "::2/16"), MOCK_CONTEXT) ); } @@ -106,47 +136,47 @@ public void testRangeQuery() { assertEquals( InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddressPoint.MAX_VALUE), - ft.rangeQuery(null, null, randomBoolean(), randomBoolean(), null, null, null, null) + ft.rangeQuery(null, null, randomBoolean(), randomBoolean(), null, null, null, MOCK_CONTEXT) ); assertEquals( InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddresses.forString("192.168.2.0")), - ft.rangeQuery(null, "192.168.2.0", randomBoolean(), true, null, null, null, null) + ft.rangeQuery(null, "192.168.2.0", randomBoolean(), true, null, null, null, MOCK_CONTEXT) ); assertEquals( InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddresses.forString("192.168.1.255")), - ft.rangeQuery(null, "192.168.2.0", randomBoolean(), false, null, null, null, null) + ft.rangeQuery(null, "192.168.2.0", randomBoolean(), false, null, null, null, MOCK_CONTEXT) ); assertEquals( InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::"), InetAddressPoint.MAX_VALUE), - ft.rangeQuery("2001:db8::", null, true, randomBoolean(), null, null, null, null) + ft.rangeQuery("2001:db8::", null, true, randomBoolean(), null, null, null, MOCK_CONTEXT) ); assertEquals( InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::1"), InetAddressPoint.MAX_VALUE), - ft.rangeQuery("2001:db8::", null, false, randomBoolean(), null, null, null, null) + ft.rangeQuery("2001:db8::", null, false, randomBoolean(), null, null, null, MOCK_CONTEXT) ); assertEquals( InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::"), InetAddresses.forString("2001:db8::ffff")), - ft.rangeQuery("2001:db8::", "2001:db8::ffff", true, true, null, null, null, null) + ft.rangeQuery("2001:db8::", "2001:db8::ffff", true, true, null, null, null, MOCK_CONTEXT) ); assertEquals( InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::1"), InetAddresses.forString("2001:db8::fffe")), - ft.rangeQuery("2001:db8::", "2001:db8::ffff", false, false, null, null, null, null) + ft.rangeQuery("2001:db8::", "2001:db8::ffff", false, false, null, null, null, MOCK_CONTEXT) ); assertEquals( InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::2"), InetAddresses.forString("2001:db8::")), // same lo/hi values but inclusive=false so this won't match anything - ft.rangeQuery("2001:db8::1", "2001:db8::1", false, false, null, null, null, null) + ft.rangeQuery("2001:db8::1", "2001:db8::1", false, false, null, null, null, MOCK_CONTEXT) ); // Upper bound is the min IP and is not inclusive - assertEquals(new MatchNoDocsQuery(), ft.rangeQuery("::", "::", true, false, null, null, null, null)); + assertEquals(new MatchNoDocsQuery(), ft.rangeQuery("::", "::", true, false, null, null, null, MOCK_CONTEXT)); // Lower bound is the max IP and is not inclusive assertEquals( @@ -159,33 +189,132 @@ public void testRangeQuery() { null, null, null, - null + MOCK_CONTEXT ) ); assertEquals( InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddresses.forString("::fffe:ffff:ffff")), // same lo/hi values but inclusive=false so this won't match anything - ft.rangeQuery("::", "0.0.0.0", true, false, null, null, null, null) + ft.rangeQuery("::", "0.0.0.0", true, false, null, null, null, MOCK_CONTEXT) ); assertEquals( InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::1:0:0:0"), InetAddressPoint.MAX_VALUE), // same lo/hi values but inclusive=false so this won't match anything - ft.rangeQuery("255.255.255.255", "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", false, true, null, null, null, null) + ft.rangeQuery("255.255.255.255", "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", false, true, null, null, null, MOCK_CONTEXT) ); assertEquals( // lower bound is ipv4, upper bound is ipv6 InetAddressPoint.newRangeQuery("field", InetAddresses.forString("192.168.1.7"), InetAddresses.forString("2001:db8::")), - ft.rangeQuery("::ffff:c0a8:107", "2001:db8::", true, true, null, null, null, null) + ft.rangeQuery("::ffff:c0a8:107", "2001:db8::", true, true, null, null, null, MOCK_CONTEXT) + ); + + ft = new IpFieldMapper.IpFieldType("field", false); + + assertEquals( + convertToDocValuesQuery(InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddressPoint.MAX_VALUE)), + ft.rangeQuery(null, null, randomBoolean(), randomBoolean(), null, null, null, MOCK_CONTEXT) + ); + + assertEquals( + convertToDocValuesQuery( + InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddresses.forString("192.168.2.0")) + ), + ft.rangeQuery(null, "192.168.2.0", randomBoolean(), true, null, null, null, MOCK_CONTEXT) + ); + + assertEquals( + convertToDocValuesQuery( + InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddresses.forString("192.168.1.255")) + ), + ft.rangeQuery(null, "192.168.2.0", randomBoolean(), false, null, null, null, MOCK_CONTEXT) + ); + + assertEquals( + convertToDocValuesQuery( + InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::"), InetAddressPoint.MAX_VALUE) + ), + ft.rangeQuery("2001:db8::", null, true, randomBoolean(), null, null, null, MOCK_CONTEXT) + ); + + assertEquals( + convertToDocValuesQuery( + InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::1"), InetAddressPoint.MAX_VALUE) + ), + ft.rangeQuery("2001:db8::", null, false, randomBoolean(), null, null, null, MOCK_CONTEXT) + ); + + assertEquals( + convertToDocValuesQuery( + InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::"), InetAddresses.forString("2001:db8::ffff")) + ), + ft.rangeQuery("2001:db8::", "2001:db8::ffff", true, true, null, null, null, MOCK_CONTEXT) + ); + + assertEquals( + convertToDocValuesQuery( + InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::1"), InetAddresses.forString("2001:db8::fffe")) + ), + ft.rangeQuery("2001:db8::", "2001:db8::ffff", false, false, null, null, null, MOCK_CONTEXT) + ); + + assertEquals( + convertToDocValuesQuery( + InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::2"), InetAddresses.forString("2001:db8::")) + ), + // same lo/hi values but inclusive=false so this won't match anything + ft.rangeQuery("2001:db8::1", "2001:db8::1", false, false, null, null, null, MOCK_CONTEXT) + ); + + // Upper bound is the min IP and is not inclusive + assertEquals(new MatchNoDocsQuery(), ft.rangeQuery("::", "::", true, false, null, null, null, MOCK_CONTEXT)); + + // Lower bound is the max IP and is not inclusive + assertEquals( + new MatchNoDocsQuery(), + ft.rangeQuery( + "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", + "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", + false, + true, + null, + null, + null, + MOCK_CONTEXT + ) + ); + + assertEquals( + convertToDocValuesQuery( + InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddresses.forString("::fffe:ffff:ffff")) + ), + // same lo/hi values but inclusive=false so this won't match anything + ft.rangeQuery("::", "0.0.0.0", true, false, null, null, null, MOCK_CONTEXT) + ); + + assertEquals( + convertToDocValuesQuery( + InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::1:0:0:0"), InetAddressPoint.MAX_VALUE) + ), + // same lo/hi values but inclusive=false so this won't match anything + ft.rangeQuery("255.255.255.255", "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", false, true, null, null, null, MOCK_CONTEXT) + ); + + assertEquals( + // lower bound is ipv4, upper bound is ipv6 + convertToDocValuesQuery( + InetAddressPoint.newRangeQuery("field", InetAddresses.forString("192.168.1.7"), InetAddresses.forString("2001:db8::")) + ), + ft.rangeQuery("::ffff:c0a8:107", "2001:db8::", true, true, null, null, null, MOCK_CONTEXT) ); MappedFieldType unsearchable = new IpFieldMapper.IpFieldType( "field", false, false, - true, + false, null, null, Collections.emptyMap(), @@ -193,9 +322,9 @@ public void testRangeQuery() { ); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> unsearchable.rangeQuery("::1", "2001::", true, true, null, null, null, null) + () -> unsearchable.rangeQuery("::1", "2001::", true, true, null, null, null, MOCK_CONTEXT) ); - assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); + assertEquals("Cannot search on field [field] since it is not indexed nor has doc values.", e.getMessage()); } public void testFetchSourceValue() throws IOException { From c8d98cf5320d65110a11988746ebed9789011bd9 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Tue, 25 Jan 2022 09:52:13 +0100 Subject: [PATCH 003/100] Allow for customised content-type validation (#80906) In order to support additional media types in request body a custom validation has to be supported. This commit moves validation from RestController to RestHandler interface (default) and allows new RestHandler implementations to provide its custom implementation closes #80482 --- docs/changelog/80906.yaml | 6 +++++ .../elasticsearch/rest/BaseRestHandler.java | 10 +++++++ .../elasticsearch/rest/RestController.java | 8 +++--- .../org/elasticsearch/rest/RestHandler.java | 7 ++--- .../rest/RestControllerTests.java | 26 +++++++++++++++++++ .../security/rest/SecurityRestFilter.java | 6 ++--- .../xpack/sql/plugin/RestSqlQueryAction.java | 6 ----- 7 files changed, 50 insertions(+), 19 deletions(-) create mode 100644 docs/changelog/80906.yaml diff --git a/docs/changelog/80906.yaml b/docs/changelog/80906.yaml new file mode 100644 index 0000000000000..a1d7cbbd6fe7a --- /dev/null +++ b/docs/changelog/80906.yaml @@ -0,0 +1,6 @@ +pr: 80906 +summary: Allow for customised content-type validation +area: Infra/REST API +type: enhancement +issues: + - 80482 diff --git a/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java b/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java index 3abfeec8fa825..a6f64509c278e 100644 --- a/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java +++ b/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java @@ -103,6 +103,11 @@ public final void handleRequest(RestRequest request, RestChannel channel, NodeCl action.accept(channel); } + @Override + public boolean mediaTypesValid(RestRequest request) { + return request.getXContentType() != null; + } + protected final String unrecognized( final RestRequest request, final Set invalids, @@ -241,5 +246,10 @@ public boolean supportsContentStream() { public boolean allowsUnsafeBuffers() { return delegate.allowsUnsafeBuffers(); } + + @Override + public boolean mediaTypesValid(RestRequest request) { + return delegate.mediaTypesValid(request); + } } } diff --git a/server/src/main/java/org/elasticsearch/rest/RestController.java b/server/src/main/java/org/elasticsearch/rest/RestController.java index 34918d6240622..5500199246263 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestController.java +++ b/server/src/main/java/org/elasticsearch/rest/RestController.java @@ -334,15 +334,15 @@ private void dispatchRequest(RestRequest request, RestChannel channel, RestHandl throws Exception { final int contentLength = request.contentLength(); if (contentLength > 0) { - final XContentType xContentType = request.getXContentType(); - if (xContentType == null) { + if (handler.mediaTypesValid(request) == false) { sendContentTypeErrorMessage(request.getAllHeaderValues("Content-Type"), channel); return; } + final XContentType xContentType = request.getXContentType(); // TODO consider refactoring to handler.supportsContentStream(xContentType). It is only used with JSON and SMILE if (handler.supportsContentStream() - && xContentType.canonical() != XContentType.JSON - && xContentType.canonical() != XContentType.SMILE) { + && XContentType.JSON != xContentType.canonical() + && XContentType.SMILE != xContentType.canonical()) { channel.sendResponse( BytesRestResponse.createSimpleErrorResponse( channel, diff --git a/server/src/main/java/org/elasticsearch/rest/RestHandler.java b/server/src/main/java/org/elasticsearch/rest/RestHandler.java index baba20eb33673..08983ffbe8914 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestHandler.java +++ b/server/src/main/java/org/elasticsearch/rest/RestHandler.java @@ -13,10 +13,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.rest.RestRequest.Method; -import org.elasticsearch.xcontent.MediaType; -import org.elasticsearch.xcontent.MediaTypeRegistry; import org.elasticsearch.xcontent.XContent; -import org.elasticsearch.xcontent.XContentType; import java.util.Collections; import java.util.List; @@ -76,8 +73,8 @@ default boolean allowSystemIndexAccessByDefault() { return false; } - default MediaTypeRegistry validAcceptMediaTypes() { - return XContentType.MEDIA_TYPE_REGISTRY; + default boolean mediaTypesValid(RestRequest request) { + return request.getXContentType() != null; } class Route { diff --git a/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java b/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java index 4fab2b6c5ca5d..0b24bcaeb6958 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java @@ -774,6 +774,32 @@ public void testCurrentVersionVNDMediaTypeIsNotUsingCompatibility() { assertTrue(channel.getSendResponseCalled()); } + public void testCustomMediaTypeValidation() { + RestController restController = new RestController(Collections.emptySet(), null, client, circuitBreakerService, usageService); + + final String mediaType = "application/x-protobuf"; + FakeRestRequest fakeRestRequest = requestWithContent(mediaType); + AssertingChannel channel = new AssertingChannel(fakeRestRequest, true, RestStatus.OK); + + // register handler that handles custom media type validation + restController.registerHandler(new Route(GET, "/foo"), new RestHandler() { + @Override + public boolean mediaTypesValid(RestRequest request) { + return request.getXContentType() == null + && request.getParsedContentType().mediaTypeWithoutParameters().equals("application/x-protobuf"); + } + + @Override + public void handleRequest(RestRequest request, RestChannel channel, NodeClient client) throws Exception { + channel.sendResponse(new BytesRestResponse(RestStatus.OK, BytesRestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY)); + } + }); + + assertFalse(channel.getSendResponseCalled()); + restController.dispatchRequest(fakeRestRequest, channel, new ThreadContext(Settings.EMPTY)); + assertTrue(channel.getSendResponseCalled()); + } + private static final class TestHttpServerTransport extends AbstractLifecycleComponent implements HttpServerTransport { TestHttpServerTransport() {} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/SecurityRestFilter.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/SecurityRestFilter.java index 06eb1acab720c..95b9f9b78560a 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/SecurityRestFilter.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/SecurityRestFilter.java @@ -24,8 +24,6 @@ import org.elasticsearch.rest.RestRequest.Method; import org.elasticsearch.rest.RestRequestFilter; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.xcontent.MediaType; -import org.elasticsearch.xcontent.MediaTypeRegistry; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.security.authc.AuthenticationService; import org.elasticsearch.xpack.security.authc.support.SecondaryAuthenticator; @@ -185,7 +183,7 @@ private RestRequest maybeWrapRestRequest(RestRequest restRequest) throws IOExcep } @Override - public MediaTypeRegistry validAcceptMediaTypes() { - return restHandler.validAcceptMediaTypes(); + public boolean mediaTypesValid(RestRequest request) { + return restHandler.mediaTypesValid(request); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlQueryAction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlQueryAction.java index 128a311c537fc..1a92cdb704b18 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlQueryAction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlQueryAction.java @@ -12,8 +12,6 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.xcontent.MediaType; -import org.elasticsearch.xcontent.MediaTypeRegistry; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.sql.action.Protocol; import org.elasticsearch.xpack.sql.action.SqlQueryAction; @@ -42,10 +40,6 @@ public List routes() { ); } - public MediaTypeRegistry validAcceptMediaTypes() { - return SqlMediaTypeParser.MEDIA_TYPE_REGISTRY; - } - @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { SqlQueryRequest sqlRequest; From 6687a28e4e800c97df9e7580cf8fea0ac1d51f0a Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Tue, 25 Jan 2022 11:00:37 +0200 Subject: [PATCH 004/100] Display security auto-configuration with fancy unicode (#82740) This PR slightly improves the format of the security auto-configuration information that is printed on the terminal when the initial cluster node first starts up. It uses eye-catching Unicode characters for bullet points. It also uses Unicode to display a continuous border, for the whole width of the terminal, before and after the information. In addition, it uses ANSI escape sequences to render some of the information in bold fonts. It will fallback to using regular characters if the JVM is set-up with a non-UTF encoding for the standard out. --- distribution/tools/ansi-console/build.gradle | 6 + .../io/ansi/AnsiConsoleLoader.java | 51 +++- .../bootstrap/ConsoleLoaderTests.java | 3 +- .../io/ansi/AnsiConsoleLoaderTests.java | 51 +++- .../elasticsearch/bootstrap/Bootstrap.java | 4 +- .../bootstrap/BootstrapInfo.java | 11 +- .../bootstrap/ConsoleLoader.java | 18 +- .../InitialNodeSecurityAutoConfiguration.java | 218 ++++++++++++++---- 8 files changed, 284 insertions(+), 78 deletions(-) diff --git a/distribution/tools/ansi-console/build.gradle b/distribution/tools/ansi-console/build.gradle index f0bd8a79d26f2..f386add3f238f 100644 --- a/distribution/tools/ansi-console/build.gradle +++ b/distribution/tools/ansi-console/build.gradle @@ -18,3 +18,9 @@ dependencies { api "org.fusesource.jansi:jansi:2.3.4" } +// the code and tests in this project cover console initialization +// which happens before the SecurityManager is installed +tasks.named("test").configure { + systemProperty 'tests.security.manager', 'false' +} + diff --git a/distribution/tools/ansi-console/src/main/java/org/elasticsearch/io/ansi/AnsiConsoleLoader.java b/distribution/tools/ansi-console/src/main/java/org/elasticsearch/io/ansi/AnsiConsoleLoader.java index 4356ce57e1ebc..81914e344e609 100644 --- a/distribution/tools/ansi-console/src/main/java/org/elasticsearch/io/ansi/AnsiConsoleLoader.java +++ b/distribution/tools/ansi-console/src/main/java/org/elasticsearch/io/ansi/AnsiConsoleLoader.java @@ -7,33 +7,72 @@ */ package org.elasticsearch.io.ansi; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.bootstrap.ConsoleLoader; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.SuppressForbidden; +import org.fusesource.jansi.Ansi; import org.fusesource.jansi.AnsiConsole; import org.fusesource.jansi.AnsiPrintStream; import org.fusesource.jansi.AnsiType; +import org.fusesource.jansi.io.AnsiOutputStream; -import java.io.PrintStream; +import java.lang.reflect.Field; +import java.lang.reflect.Method; +import java.nio.charset.Charset; import java.util.function.Supplier; +import static org.apache.logging.log4j.LogManager.getLogger; + /** - * Loads the({@link PrintStream} print stream) from {@link AnsiConsole} and checks whether it meets our requirements for a "Console". + * Loads the {@link AnsiConsole} and checks whether it meets our requirements for a "Console". * @see org.elasticsearch.bootstrap.ConsoleLoader */ -public class AnsiConsoleLoader implements Supplier { +public class AnsiConsoleLoader implements Supplier { + + private static final Logger logger = getLogger(AnsiConsoleLoader.class); - public PrintStream get() { + public ConsoleLoader.Console get() { final AnsiPrintStream out = AnsiConsole.out(); + return newConsole(out); + } + + // package-private for tests + static @Nullable ConsoleLoader.Console newConsole(AnsiPrintStream out) { if (isValidConsole(out)) { - return out; + return new ConsoleLoader.Console(out, () -> out.getTerminalWidth(), Ansi.isEnabled(), tryExtractPrintCharset(out)); } else { return null; } } - static boolean isValidConsole(AnsiPrintStream out) { + private static boolean isValidConsole(AnsiPrintStream out) { return out != null // cannot load stdout && out.getType() != AnsiType.Redirected // output is a pipe (etc) && out.getType() != AnsiType.Unsupported // could not determine terminal type && out.getTerminalWidth() > 0 // docker, non-terminal logs ; } + + /** + * Uses reflection on the JANSI lib in order to expose the {@code Charset} used to encode the console's print stream. + * The {@code Charset} is not otherwise exposed by the library, and this avoids replicating the charset selection logic in out code. + */ + @SuppressForbidden(reason = "Best effort exposing print stream's charset with reflection") + @Nullable + private static Charset tryExtractPrintCharset(AnsiPrintStream ansiPrintStream) { + try { + Method getOutMethod = ansiPrintStream.getClass().getDeclaredMethod("getOut"); + getOutMethod.setAccessible(true); + AnsiOutputStream ansiOutputStream = (AnsiOutputStream) getOutMethod.invoke(ansiPrintStream); + Field charsetField = ansiOutputStream.getClass().getDeclaredField("cs"); + charsetField.setAccessible(true); + return (Charset) charsetField.get(ansiOutputStream); + } catch (Throwable t) { + // has the library been upgraded and it now doesn't expose the same fields with the same names? + // is the Security Manager installed preventing the access + logger.info("Failed to detect JANSI's print stream encoding", t); + return null; + } + } } diff --git a/distribution/tools/ansi-console/src/test/java/org/elasticsearch/bootstrap/ConsoleLoaderTests.java b/distribution/tools/ansi-console/src/test/java/org/elasticsearch/bootstrap/ConsoleLoaderTests.java index e0bc5b04ec4e4..f34ddae8a2ffb 100644 --- a/distribution/tools/ansi-console/src/test/java/org/elasticsearch/bootstrap/ConsoleLoaderTests.java +++ b/distribution/tools/ansi-console/src/test/java/org/elasticsearch/bootstrap/ConsoleLoaderTests.java @@ -11,7 +11,6 @@ import org.elasticsearch.io.ansi.AnsiConsoleLoader; import org.elasticsearch.test.ESTestCase; -import java.io.PrintStream; import java.util.function.Supplier; import static org.hamcrest.Matchers.instanceOf; @@ -20,7 +19,7 @@ public class ConsoleLoaderTests extends ESTestCase { public void testBuildSupplier() { - final Supplier supplier = ConsoleLoader.buildConsoleLoader(AnsiConsoleLoader.class.getClassLoader()); + final Supplier supplier = ConsoleLoader.buildConsoleLoader(AnsiConsoleLoader.class.getClassLoader()); assertThat(supplier, notNullValue()); assertThat(supplier, instanceOf(AnsiConsoleLoader.class)); } diff --git a/distribution/tools/ansi-console/src/test/java/org/elasticsearch/io/ansi/AnsiConsoleLoaderTests.java b/distribution/tools/ansi-console/src/test/java/org/elasticsearch/io/ansi/AnsiConsoleLoaderTests.java index 4e2da7dd54480..4b40f8fa568de 100644 --- a/distribution/tools/ansi-console/src/test/java/org/elasticsearch/io/ansi/AnsiConsoleLoaderTests.java +++ b/distribution/tools/ansi-console/src/test/java/org/elasticsearch/io/ansi/AnsiConsoleLoaderTests.java @@ -8,7 +8,9 @@ package org.elasticsearch.io.ansi; +import org.elasticsearch.bootstrap.ConsoleLoader; import org.elasticsearch.test.ESTestCase; +import org.fusesource.jansi.Ansi; import org.fusesource.jansi.AnsiColors; import org.fusesource.jansi.AnsiMode; import org.fusesource.jansi.AnsiPrintStream; @@ -17,9 +19,12 @@ import org.fusesource.jansi.io.AnsiProcessor; import java.io.ByteArrayOutputStream; +import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; public class AnsiConsoleLoaderTests extends ESTestCase { @@ -30,35 +35,68 @@ public class AnsiConsoleLoaderTests extends ESTestCase { private static final AnsiOutputStream.IoRunnable NO_OP_RUNNABLE = () -> {}; + private static final Charset[] charsets = new Charset[] { + StandardCharsets.US_ASCII, + StandardCharsets.ISO_8859_1, + StandardCharsets.UTF_8, + StandardCharsets.UTF_16, + StandardCharsets.UTF_16LE, + StandardCharsets.UTF_16BE }; + public void testNullOutputIsNotConsole() { - assertThat(AnsiConsoleLoader.isValidConsole(null), is(false)); + assertThat(AnsiConsoleLoader.newConsole(null), nullValue()); } public void testRedirectedOutputIsNotConsole() { try (AnsiPrintStream ansiPrintStream = buildStream(AnsiType.Redirected, randomIntBetween(80, 120))) { - assertThat(AnsiConsoleLoader.isValidConsole(ansiPrintStream), is(false)); + assertThat(AnsiConsoleLoader.newConsole(ansiPrintStream), nullValue()); } } public void testUnsupportedTerminalIsNotConsole() { try (AnsiPrintStream ansiPrintStream = buildStream(AnsiType.Unsupported, randomIntBetween(80, 120))) { - assertThat(AnsiConsoleLoader.isValidConsole(ansiPrintStream), is(false)); + assertThat(AnsiConsoleLoader.newConsole(ansiPrintStream), nullValue()); } } public void testZeroWidthTerminalIsNotConsole() { try (AnsiPrintStream ansiPrintStream = buildStream(randomFrom(SUPPORTED_TERMINAL_TYPES), 0)) { - assertThat(AnsiConsoleLoader.isValidConsole(ansiPrintStream), is(false)); + assertThat(AnsiConsoleLoader.newConsole(ansiPrintStream), nullValue()); } } public void testStandardTerminalIsConsole() { + int width = randomIntBetween(40, 260); + try (AnsiPrintStream ansiPrintStream = buildStream(randomFrom(SUPPORTED_TERMINAL_TYPES), width)) { + ConsoleLoader.Console console = AnsiConsoleLoader.newConsole(ansiPrintStream); + assertThat(console, notNullValue()); + assertThat(console.width().get(), is(width)); + } + } + + public void testConsoleCharset() { + Charset charset = randomFrom(charsets); + try (AnsiPrintStream ansiPrintStream = buildStream(randomFrom(SUPPORTED_TERMINAL_TYPES), randomIntBetween(40, 260), charset)) { + ConsoleLoader.Console console = AnsiConsoleLoader.newConsole(ansiPrintStream); + assertThat(console, notNullValue()); + assertThat(console.charset(), is(charset)); + } + } + + public void testDisableANSI() { + Ansi.setEnabled(false); try (AnsiPrintStream ansiPrintStream = buildStream(randomFrom(SUPPORTED_TERMINAL_TYPES), randomIntBetween(40, 260))) { - assertThat(AnsiConsoleLoader.isValidConsole(ansiPrintStream), is(true)); + ConsoleLoader.Console console = AnsiConsoleLoader.newConsole(ansiPrintStream); + assertThat(console, notNullValue()); + assertThat(console.ansiEnabled(), is(false)); } } private AnsiPrintStream buildStream(AnsiType type, int width) { + return buildStream(type, width, randomFrom(charsets)); + } + + private AnsiPrintStream buildStream(AnsiType type, int width, Charset cs) { final ByteArrayOutputStream baos = new ByteArrayOutputStream(); final AnsiOutputStream ansiOutputStream = new AnsiOutputStream( baos, @@ -67,12 +105,11 @@ private AnsiPrintStream buildStream(AnsiType type, int width) { new AnsiProcessor(baos), type, randomFrom(AnsiColors.values()), - randomFrom(StandardCharsets.UTF_8, StandardCharsets.US_ASCII, StandardCharsets.UTF_16, StandardCharsets.ISO_8859_1), + cs, NO_OP_RUNNABLE, NO_OP_RUNNABLE, randomBoolean() ); return new AnsiPrintStream(ansiOutputStream, randomBoolean()); } - } diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java b/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java index c57f6e1f86c21..ba94dcb965f41 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java @@ -301,7 +301,7 @@ static void init(final boolean foreground, final Path pidFile, final boolean qui final SecureSettings keystore = BootstrapUtil.loadSecureSettings(initialEnv); final Environment environment = createEnvironment(pidFile, keystore, initialEnv.settings(), initialEnv.configFile()); - BootstrapInfo.setConsoleOutput(getConsole(environment)); + BootstrapInfo.setConsole(getConsole(environment)); // the LogConfigurator will replace System.out and System.err with redirects to our logfile, so we need to capture // the stream objects before calling LogConfigurator to be able to close them when appropriate @@ -417,7 +417,7 @@ static void init(final boolean foreground, final Path pidFile, final boolean qui } } - private static PrintStream getConsole(Environment environment) { + private static ConsoleLoader.Console getConsole(Environment environment) { return ConsoleLoader.loadConsole(environment); } diff --git a/server/src/main/java/org/elasticsearch/bootstrap/BootstrapInfo.java b/server/src/main/java/org/elasticsearch/bootstrap/BootstrapInfo.java index 295cc00fa67e0..0b63211f2851a 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/BootstrapInfo.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/BootstrapInfo.java @@ -11,7 +11,6 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.core.SuppressForbidden; -import java.io.PrintStream; import java.util.Dictionary; import java.util.Enumeration; @@ -21,7 +20,7 @@ @SuppressForbidden(reason = "exposes read-only view of system properties") public final class BootstrapInfo { - private static final SetOnce consoleOutput = new SetOnce<>(); + private static final SetOnce console = new SetOnce<>(); /** no instantiation */ private BootstrapInfo() {} @@ -53,8 +52,8 @@ public static boolean isSystemCallFilterInstalled() { /** * Returns a reference to a stream attached to Standard Output, iff we have determined that stdout is a console (tty) */ - public static PrintStream getConsoleOutput() { - return consoleOutput.get(); + public static ConsoleLoader.Console getConsole() { + return console.get(); } /** @@ -123,8 +122,8 @@ public static Dictionary getSystemProperties() { public static void init() {} - static void setConsoleOutput(PrintStream output) { - consoleOutput.set(output); + static void setConsole(ConsoleLoader.Console console) { + BootstrapInfo.console.set(console); } } diff --git a/server/src/main/java/org/elasticsearch/bootstrap/ConsoleLoader.java b/server/src/main/java/org/elasticsearch/bootstrap/ConsoleLoader.java index 703739ceff355..8b0d914e2da3d 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/ConsoleLoader.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/ConsoleLoader.java @@ -8,6 +8,7 @@ package org.elasticsearch.bootstrap; +import org.elasticsearch.core.Nullable; import org.elasticsearch.env.Environment; import java.io.IOException; @@ -16,6 +17,7 @@ import java.net.MalformedURLException; import java.net.URL; import java.net.URLClassLoader; +import java.nio.charset.Charset; import java.nio.file.Files; import java.nio.file.Path; import java.util.function.Supplier; @@ -28,20 +30,20 @@ public class ConsoleLoader { private static final String CONSOLE_LOADER_CLASS = "org.elasticsearch.io.ansi.AnsiConsoleLoader"; - public static PrintStream loadConsole(Environment env) { + public static Console loadConsole(Environment env) { final ClassLoader classLoader = buildClassLoader(env); - final Supplier supplier = buildConsoleLoader(classLoader); + final Supplier supplier = buildConsoleLoader(classLoader); return supplier.get(); } + public record Console(PrintStream printStream, Supplier width, Boolean ansiEnabled, @Nullable Charset charset) {} + @SuppressWarnings("unchecked") - static Supplier buildConsoleLoader(ClassLoader classLoader) { + static Supplier buildConsoleLoader(ClassLoader classLoader) { try { - final Class> cls = (Class>) classLoader.loadClass( - CONSOLE_LOADER_CLASS - ); - final Constructor> constructor = cls.getConstructor(); - final Supplier supplier = constructor.newInstance(); + final Class> cls = (Class>) classLoader.loadClass(CONSOLE_LOADER_CLASS); + final Constructor> constructor = cls.getConstructor(); + final Supplier supplier = constructor.newInstance(); return supplier; } catch (ReflectiveOperationException e) { throw new RuntimeException("Failed to load ANSI console", e); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/InitialNodeSecurityAutoConfiguration.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/InitialNodeSecurityAutoConfiguration.java index 5a2284ed21830..cdc93a9c48a6d 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/InitialNodeSecurityAutoConfiguration.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/InitialNodeSecurityAutoConfiguration.java @@ -9,10 +9,12 @@ import org.apache.log4j.LogManager; import org.apache.log4j.Logger; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.bulk.BackoffPolicy; import org.elasticsearch.action.support.GroupedActionListener; import org.elasticsearch.bootstrap.BootstrapInfo; +import org.elasticsearch.bootstrap.ConsoleLoader; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.Strings; import org.elasticsearch.common.util.concurrent.AbstractRunnable; @@ -21,11 +23,10 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore; -import org.elasticsearch.xpack.security.enrollment.BaseEnrollmentTokenGenerator; import org.elasticsearch.xpack.security.enrollment.InternalEnrollmentTokenGenerator; import org.elasticsearch.xpack.security.support.SecurityIndexManager; -import java.io.PrintStream; +import java.nio.charset.StandardCharsets; import java.util.HashMap; import java.util.Iterator; import java.util.Map; @@ -73,8 +74,8 @@ public static void maybeGenerateEnrollmentTokensAndElasticCredentialsOnNodeStart client ); - final PrintStream out = getConsoleOutput(); - if (out == null) { + final ConsoleLoader.Console console = getConsole(); + if (console == null) { LOGGER.info( "Auto-configuration will not generate a password for the elastic built-in superuser, as we cannot " + " determine if there is a terminal attached to the elasticsearch process. You can use the" @@ -132,7 +133,7 @@ protected void doRun() { kibanaEnrollmentToken, nodeEnrollmentToken, httpsCaFingerprint, - out + console ); }, e -> LOGGER.error("Unexpected exception during security auto-configuration", e)), 3 @@ -191,17 +192,17 @@ protected void doRun() { }); } - private static PrintStream getConsoleOutput() { - final PrintStream output = BootstrapInfo.getConsoleOutput(); - if (output == null) { + private static ConsoleLoader.Console getConsole() { + final ConsoleLoader.Console console = BootstrapInfo.getConsole(); + if (console == null) { return null; } // Check if it has been closed, try to write something so that we trigger PrintStream#ensureOpen - output.println(); - if (output.checkError()) { + console.printStream().println(); + if (console.printStream().checkError()) { return null; } - return output; + return console; } private static void outputInformationToConsole( @@ -209,73 +210,196 @@ private static void outputInformationToConsole( String kibanaEnrollmentToken, String nodeEnrollmentToken, String caCertFingerprint, - PrintStream out + ConsoleLoader.Console console ) { + // Use eye-catching pictograms to output the configuration information, but only if the + // console charset utilizes some known variation of UTF, otherwise we risk that the encoder + // cannot handle the special unicode code points and will display funky question marks instead + boolean useUnicode = StandardCharsets.UTF_8.equals(console.charset()) + || StandardCharsets.UTF_16.equals(console.charset()) + || StandardCharsets.UTF_16LE.equals(console.charset()) + || StandardCharsets.UTF_16BE.equals(console.charset()); + final String infoBullet = useUnicode ? "\u2139\uFE0F" : "->"; + final String bullet = useUnicode ? "\u2022" : "*"; + final String hyphenBullet = useUnicode ? "\u2043" : "-"; + final String errorBullet = useUnicode ? "\u274C" : "X"; + final String successBullet = useUnicode ? "\u2705" : "->"; + final String horizontalBorderLine = useUnicode ? "\u2501" : "-"; + final String boldOnANSI = console.ansiEnabled() ? "\u001B[1m" : ""; + final String boldOffANSI = console.ansiEnabled() ? "\u001B[22m" : ""; + final String cmdOn = "`"; + final String cmdOff = "`"; + final int horizontalBorderLength = console.width().get(); StringBuilder builder = new StringBuilder(); builder.append(System.lineSeparator()); builder.append(System.lineSeparator()); - builder.append("--------------------------------------------------------------------------------------------------------------"); + builder.append(System.lineSeparator()); + builder.append(System.lineSeparator()); + builder.append(horizontalBorderLine.repeat(horizontalBorderLength)); + builder.append(System.lineSeparator()); + builder.append(successBullet + " Elasticsearch security features have been automatically configured!"); + builder.append(System.lineSeparator()); + builder.append(successBullet + " Authentication is enabled and cluster connections are encrypted."); builder.append(System.lineSeparator()); builder.append(System.lineSeparator()); if (elasticPassword == null) { - builder.append("Unable to auto-generate the password for the elastic built-in superuser."); - } else if (Strings.isEmpty(elasticPassword)) { - builder.append("The generated password for the elastic built-in superuser has not been changed."); - } else { - builder.append("The generated password for the elastic built-in superuser is:"); + builder.append( + errorBullet + + " Unable to auto-generate the password for the " + + boldOnANSI + + "elastic" + + boldOffANSI + + " built-in superuser." + ); + } else if (false == Strings.isEmpty(elasticPassword)) { + builder.append( + infoBullet + + " Password for the " + + boldOnANSI + + "elastic" + + boldOffANSI + + " user (reset with " + + cmdOn + + "bin/elasticsearch-reset-password -u elastic" + + cmdOff + + "):" + ); builder.append(System.lineSeparator()); - builder.append(elasticPassword); + builder.append(" " + boldOnANSI + elasticPassword + boldOffANSI); + } + builder.append(System.lineSeparator()); + builder.append(System.lineSeparator()); + if (null != caCertFingerprint) { + builder.append(infoBullet + " HTTP CA certificate SHA-256 fingerprint:"); + builder.append(System.lineSeparator()); + builder.append(" " + boldOnANSI + caCertFingerprint + boldOffANSI); } builder.append(System.lineSeparator()); builder.append(System.lineSeparator()); + if (null != kibanaEnrollmentToken) { - builder.append("The enrollment token for Kibana instances, valid for the next "); - builder.append(BaseEnrollmentTokenGenerator.ENROLL_API_KEY_EXPIRATION_MINUTES); - builder.append(" minutes:"); + builder.append(infoBullet + " Configure Kibana to use this cluster:"); + builder.append(System.lineSeparator()); + builder.append(bullet + " Run Kibana and click the configuration link in the terminal when Kibana starts."); builder.append(System.lineSeparator()); - builder.append(kibanaEnrollmentToken); + builder.append(bullet + " Copy the following enrollment token and paste it into Kibana in your browser "); + builder.append("(valid for the next 30 minutes):"); + builder.append(System.lineSeparator()); + builder.append(" " + boldOnANSI + kibanaEnrollmentToken + boldOffANSI); } else { - builder.append("Unable to generate an enrollment token for Kibana instances."); + builder.append(errorBullet + " Unable to generate an enrollment token for Kibana instances, "); + builder.append("try invoking " + cmdOn + "bin/elasticsearch-create-enrollment-token -s kibana" + cmdOff + "."); } builder.append(System.lineSeparator()); builder.append(System.lineSeparator()); - if (nodeEnrollmentToken == null) { - builder.append("Unable to generate an enrollment token for Elasticsearch nodes."); + + // Node enrollment token + if (null == nodeEnrollmentToken) { + builder.append(errorBullet + " An enrollment token to enroll new nodes wasn't generated."); + builder.append(" To add nodes and enroll them into this cluster:"); builder.append(System.lineSeparator()); + builder.append(bullet + " On this node:"); builder.append(System.lineSeparator()); - } else if (false == Strings.isEmpty(nodeEnrollmentToken)) { - builder.append("The enrollment token for Elasticsearch instances, valid for the next "); - builder.append(BaseEnrollmentTokenGenerator.ENROLL_API_KEY_EXPIRATION_MINUTES); - builder.append(" minutes:"); + builder.append( + " " + + hyphenBullet + + " Create an enrollment token with " + + cmdOn + + "bin/elasticsearch-create-enrollment-token -s node" + + cmdOff + + "." + ); builder.append(System.lineSeparator()); - builder.append(nodeEnrollmentToken); + builder.append(" " + hyphenBullet + " Restart Elasticsearch."); builder.append(System.lineSeparator()); + builder.append(bullet + " On other nodes:"); builder.append(System.lineSeparator()); - } - if (null != caCertFingerprint) { - builder.append("The hex-encoded SHA-256 fingerprint of the generated HTTPS CA DER-encoded certificate:"); + builder.append( + " " + + hyphenBullet + + " Start Elasticsearch with " + + cmdOn + + "bin/elasticsearch --enrollment-token " + + cmdOff + + ", using the enrollment token that you generated." + ); + } else if (Strings.isEmpty(nodeEnrollmentToken)) { + builder.append(infoBullet + " Configure other nodes to join this cluster:"); builder.append(System.lineSeparator()); - builder.append(caCertFingerprint); + builder.append(bullet + " On this node:"); + builder.append(System.lineSeparator()); + builder.append( + " " + + hyphenBullet + + " Create an enrollment token with " + + cmdOn + + "bin/elasticsearch-create-enrollment-token -s node" + + cmdOff + + "." + ); builder.append(System.lineSeparator()); + builder.append( + " " + + hyphenBullet + + " Uncomment the " + + boldOnANSI + + "transport.host" + + boldOffANSI + + " setting at the end of " + + boldOnANSI + + "config/elasticsearch.yml" + + boldOffANSI + + "." + ); + builder.append(System.lineSeparator()); + builder.append(" " + hyphenBullet + " Restart Elasticsearch."); + builder.append(System.lineSeparator()); + builder.append(bullet + " On other nodes:"); + builder.append(System.lineSeparator()); + builder.append( + " " + + hyphenBullet + + " Start Elasticsearch with " + + cmdOn + + "bin/elasticsearch --enrollment-token " + + cmdOff + + ", using the enrollment token that you generated." + ); + } else { + builder.append(infoBullet + " Configure other nodes to join this cluster:"); + builder.append(System.lineSeparator()); + builder.append( + bullet + + " Copy the following enrollment token and start new Elasticsearch nodes with " + + cmdOn + + "bin/elasticsearch --enrollment-token " + + cmdOff + + " (valid for the next 30 minutes):" + ); + builder.append(System.lineSeparator()); + builder.append(" " + boldOnANSI + nodeEnrollmentToken + boldOffANSI); + builder.append(System.lineSeparator()); + builder.append(System.lineSeparator()); + builder.append(" If you're running in Docker, copy the enrollment token and run:"); + builder.append(System.lineSeparator()); + builder.append( + " " + + cmdOn + + "docker run -e \"ENROLLMENT_TOKEN=\" docker.elastic.co/elasticsearch/elasticsearch:" + + Version.CURRENT + + cmdOff + ); } + builder.append(System.lineSeparator()); - builder.append(System.lineSeparator()); - builder.append("You can complete the following actions at any time:"); - builder.append(System.lineSeparator()); - builder.append("Reset the password of the elastic built-in superuser with 'bin/elasticsearch-reset-password -u elastic'."); - builder.append(System.lineSeparator()); - builder.append(System.lineSeparator()); - builder.append("Generate an enrollment token for Kibana instances with 'bin/elasticsearch-create-enrollment-token -s kibana'."); - builder.append(System.lineSeparator()); - builder.append(System.lineSeparator()); - builder.append("Generate an enrollment token for Elasticsearch nodes with 'bin/elasticsearch-create-enrollment-token -s node'."); + builder.append(horizontalBorderLine.repeat(horizontalBorderLength)); builder.append(System.lineSeparator()); builder.append(System.lineSeparator()); - builder.append("--------------------------------------------------------------------------------------------------------------"); builder.append(System.lineSeparator()); builder.append(System.lineSeparator()); - out.println(builder); + + console.printStream().println(builder); } interface OnNodeStartedListener { From 7efce30ed25db3095c9a33564cfb5ea4e0e02a08 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Przemys=C5=82aw=20Witek?= Date: Tue, 25 Jan 2022 10:17:29 +0100 Subject: [PATCH 005/100] [Transform] Make it possible to clear retention policy on an existing transform (#82703) --- .../transforms/NullRetentionPolicyConfig.java | 61 +++++++++++++++++++ .../transforms/TransformConfigUpdate.java | 20 ++++-- .../AbstractSerializingTransformTestCase.java | 8 +++ ...tractWireSerializingTransformTestCase.java | 8 +++ .../TransformConfigUpdateTests.java | 48 +++++++++++++-- .../test/transform/transforms_update.yml | 45 ++++++++++++++ 6 files changed, 180 insertions(+), 10 deletions(-) create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/NullRetentionPolicyConfig.java diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/NullRetentionPolicyConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/NullRetentionPolicyConfig.java new file mode 100644 index 0000000000000..7210862bbf6d0 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/NullRetentionPolicyConfig.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.transform.transforms; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.deprecation.DeprecationIssue; + +import java.io.IOException; +import java.util.function.Consumer; + +/** + * {@link NullRetentionPolicyConfig} is the implementation of {@link RetentionPolicyConfig} used when the user explicitly sets the + * retention_policy to {@code null} in the _update request: + * + * POST _transform/some-transform/_update + * { + * "retention_policy": null + * } + * + * This is treated *differently* than simply omitting retention_policy from the request as it instructs the API to clear existing + * retention_policy from some-transform. + */ +public class NullRetentionPolicyConfig implements RetentionPolicyConfig { + + public static final ParseField NAME = new ParseField("null_retention_policy"); + public static final NullRetentionPolicyConfig INSTANCE = new NullRetentionPolicyConfig(); + + private NullRetentionPolicyConfig() {} + + @Override + public ActionRequestValidationException validate(ActionRequestValidationException validationException) { + throw new UnsupportedOperationException(); + } + + @Override + public void checkForDeprecations(String id, NamedXContentRegistry namedXContentRegistry, Consumer onDeprecation) { + throw new UnsupportedOperationException(); + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public String getWriteableName() { + return NAME.getPreferredName(); + } + + @Override + public void writeTo(final StreamOutput out) throws IOException {} +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfigUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfigUpdate.java index cb221e15b0fa6..67160d9932596 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfigUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfigUpdate.java @@ -63,11 +63,15 @@ public class TransformConfigUpdate implements Writeable { PARSER.declareString(optionalConstructorArg(), TransformField.DESCRIPTION); PARSER.declareObject(optionalConstructorArg(), (p, c) -> SettingsConfig.fromXContent(p, false), TransformField.SETTINGS); PARSER.declareObject(optionalConstructorArg(), (p, c) -> p.mapOrdered(), TransformField.METADATA); - PARSER.declareNamedObject( - optionalConstructorArg(), - (p, c, n) -> p.namedObject(RetentionPolicyConfig.class, n, c), - TransformField.RETENTION_POLICY - ); + PARSER.declareObjectOrNull(optionalConstructorArg(), (p, c) -> { + XContentParser.Token token = p.nextToken(); + assert token == XContentParser.Token.FIELD_NAME; + String currentName = p.currentName(); + RetentionPolicyConfig namedObject = p.namedObject(RetentionPolicyConfig.class, currentName, c); + token = p.nextToken(); + assert token == XContentParser.Token.END_OBJECT; + return namedObject; + }, NullRetentionPolicyConfig.INSTANCE, TransformField.RETENTION_POLICY); } private final SourceConfig source; @@ -299,7 +303,11 @@ public TransformConfig apply(TransformConfig config) { builder.setMetadata(metadata); } if (retentionPolicyConfig != null) { - builder.setRetentionPolicyConfig(retentionPolicyConfig); + if (NullRetentionPolicyConfig.INSTANCE.equals(retentionPolicyConfig)) { + builder.setRetentionPolicyConfig(null); + } else { + builder.setRetentionPolicyConfig(retentionPolicyConfig); + } } builder.setVersion(Version.CURRENT); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/AbstractSerializingTransformTestCase.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/AbstractSerializingTransformTestCase.java index 4469ee74385f6..5237ef0925d8b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/AbstractSerializingTransformTestCase.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/AbstractSerializingTransformTestCase.java @@ -23,6 +23,7 @@ import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.ToXContent.Params; +import org.elasticsearch.xpack.core.transform.transforms.NullRetentionPolicyConfig; import org.elasticsearch.xpack.core.transform.transforms.RetentionPolicyConfig; import org.elasticsearch.xpack.core.transform.transforms.SyncConfig; import org.elasticsearch.xpack.core.transform.transforms.TimeRetentionPolicyConfig; @@ -69,6 +70,13 @@ public void registerNamedObjects() { TimeRetentionPolicyConfig::new ) ); + namedWriteables.add( + new NamedWriteableRegistry.Entry( + RetentionPolicyConfig.class, + NullRetentionPolicyConfig.NAME.getPreferredName(), + in -> NullRetentionPolicyConfig.INSTANCE + ) + ); List namedXContents = searchModule.getNamedXContents(); namedXContents.add( diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/AbstractWireSerializingTransformTestCase.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/AbstractWireSerializingTransformTestCase.java index 37c75f846f73b..55fbe5a8280bc 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/AbstractWireSerializingTransformTestCase.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/AbstractWireSerializingTransformTestCase.java @@ -19,6 +19,7 @@ import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xpack.core.transform.TransformField; import org.elasticsearch.xpack.core.transform.TransformNamedXContentProvider; +import org.elasticsearch.xpack.core.transform.transforms.NullRetentionPolicyConfig; import org.elasticsearch.xpack.core.transform.transforms.RetentionPolicyConfig; import org.elasticsearch.xpack.core.transform.transforms.SyncConfig; import org.elasticsearch.xpack.core.transform.transforms.TimeRetentionPolicyConfig; @@ -52,6 +53,13 @@ public void registerNamedObjects() { TimeRetentionPolicyConfig::new ) ); + namedWriteables.add( + new NamedWriteableRegistry.Entry( + RetentionPolicyConfig.class, + NullRetentionPolicyConfig.NAME.getPreferredName(), + in -> NullRetentionPolicyConfig.INSTANCE + ) + ); List namedXContents = searchModule.getNamedXContents(); namedXContents.addAll(new TransformNamedXContentProvider().getNamedXContentParsers()); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfigUpdateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfigUpdateTests.java index 589f73a241ba3..84e788ce9cfed 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfigUpdateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfigUpdateTests.java @@ -31,6 +31,8 @@ import static org.elasticsearch.xpack.core.transform.transforms.TransformConfigTests.randomSyncConfig; import static org.elasticsearch.xpack.core.transform.transforms.TransformConfigTests.randomTransformConfig; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; public class TransformConfigUpdateTests extends AbstractWireSerializingTransformTestCase { @@ -43,7 +45,7 @@ public static TransformConfigUpdate randomTransformConfigUpdate() { randomBoolean() ? null : randomAlphaOfLengthBetween(1, 1000), randomBoolean() ? null : SettingsConfigTests.randomSettingsConfig(), randomBoolean() ? null : randomMetadata(), - randomBoolean() ? null : randomRetentionPolicyConfig() + randomBoolean() ? null : randomBoolean() ? randomRetentionPolicyConfig() : NullRetentionPolicyConfig.INSTANCE ); } @@ -144,6 +146,40 @@ public void testApply() { assertThat(updatedConfig.getVersion(), equalTo(Version.CURRENT)); } + public void testApplyRetentionPolicy() { + TransformConfig config = TransformConfigTests.randomTransformConfig(); + + RetentionPolicyConfig timeRetentionPolicyConfig = new TimeRetentionPolicyConfig("field", TimeValue.timeValueDays(1)); + TransformConfigUpdate setRetentionPolicy = new TransformConfigUpdate( + null, + null, + null, + null, + null, + null, + null, + timeRetentionPolicyConfig + ); + config = setRetentionPolicy.apply(config); + assertThat(config.getRetentionPolicyConfig(), is(equalTo(timeRetentionPolicyConfig))); + + TransformConfigUpdate clearRetentionPolicy = new TransformConfigUpdate( + null, + null, + null, + null, + null, + null, + null, + NullRetentionPolicyConfig.INSTANCE + ); + config = clearRetentionPolicy.apply(config); + assertThat(config.getRetentionPolicyConfig(), is(nullValue())); + + config = setRetentionPolicy.apply(config); + assertThat(config.getRetentionPolicyConfig(), is(equalTo(timeRetentionPolicyConfig))); + } + public void testApplySettings() { TransformConfig config = new TransformConfig( "time-transform", @@ -354,9 +390,13 @@ private void toXContent(TransformConfigUpdate update, XContentBuilder builder) t builder.field(TransformField.METADATA.getPreferredName(), update.getMetadata()); } if (update.getRetentionPolicyConfig() != null) { - builder.startObject(TransformField.RETENTION_POLICY.getPreferredName()); - builder.field(update.getRetentionPolicyConfig().getWriteableName(), update.getRetentionPolicyConfig()); - builder.endObject(); + if (NullRetentionPolicyConfig.INSTANCE.equals(update.getRetentionPolicyConfig())) { + builder.nullField(TransformField.RETENTION_POLICY.getPreferredName()); + } else { + builder.startObject(TransformField.RETENTION_POLICY.getPreferredName()); + builder.field(update.getRetentionPolicyConfig().getWriteableName(), update.getRetentionPolicyConfig()); + builder.endObject(); + } } builder.endObject(); diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/transform/transforms_update.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/transform/transforms_update.yml index 2ef844cdc1d90..c246f8273e6c3 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/transform/transforms_update.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/transform/transforms_update.yml @@ -206,6 +206,51 @@ setup: - match: { transforms.0.sync.time.delay: "120m" } - match: { transforms.0.frequency: "5s" } +--- +"Test update retention policy": + - do: + transform.get_transform: + transform_id: "updating-airline-transform" + - match: { count: 1 } + - match: { transforms.0.id: "updating-airline-transform" } + - match: { transforms.0.retention_policy: null } + + - do: + transform.update_transform: + transform_id: "updating-airline-transform" + body: > + { + "retention_policy": { + "time": { + "field": "time", + "max_age": "24h" + } + } + } + + - do: + transform.get_transform: + transform_id: "updating-airline-transform" + - match: { count: 1 } + - match: { transforms.0.id: "updating-airline-transform" } + - match: { transforms.0.retention_policy.time.field: "time" } + - match: { transforms.0.retention_policy.time.max_age: "24h" } + + - do: + transform.update_transform: + transform_id: "updating-airline-transform" + body: > + { + "retention_policy": null + } + + - do: + transform.get_transform: + transform_id: "updating-airline-transform" + - match: { count: 1 } + - match: { transforms.0.id: "updating-airline-transform" } + - match: { transforms.0.retention_policy: null } + --- "Test transform where dest is included in source": - do: From 2715db810359076c3202977404bad11663566076 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Tue, 25 Jan 2022 10:17:49 +0100 Subject: [PATCH 006/100] [DOCS] Updates Painless transform examples (#82802) --- docs/reference/transform/painless-examples.asciidoc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/reference/transform/painless-examples.asciidoc b/docs/reference/transform/painless-examples.asciidoc index 31883880cb94c..8b7e73f529c25 100644 --- a/docs/reference/transform/painless-examples.asciidoc +++ b/docs/reference/transform/painless-examples.asciidoc @@ -573,11 +573,11 @@ POST _transform/_preview all_docs.add(span); } } - all_docs.sort((HashMap o1, HashMap o2)->o1['@timestamp'].millis.compareTo(o2['@timestamp'].millis)); + all_docs.sort((HashMap o1, HashMap o2)->o1['@timestamp'].toEpochMilli()compareTo(o2['@timestamp']-toEpochMilli())); def size = all_docs.size(); def min_time = all_docs[0]['@timestamp']; def max_time = all_docs[size-1]['@timestamp']; - def duration = max_time.millis - min_time.millis; + def duration = max_time.toEpochMilli() - min_time.toEpochMilli(); def entry_page = all_docs[0]['url']; def exit_path = all_docs[size-1]['url']; def first_referrer = all_docs[0]['referrer']; @@ -619,7 +619,7 @@ The API call results in a similar response: { "num_pages_viewed" : 2.0, "session_details" : { - "duration" : 131374, + "duration" : 100300001, "first_referrer" : "https://www.bing.com/", "entry_page" : "http://www.leroymerlin.fr/v3/p/produits/materiaux-menuiserie/porte-coulissante-porte-interieure-escalier-et-rambarde/barriere-de-securite-l1308218463", "first_time" : "2017-01-10T21:22:52.982Z", @@ -632,7 +632,7 @@ The API call results in a similar response: { "num_pages_viewed" : 10.0, "session_details" : { - "duration" : 343112, + "duration" : 343100405, "first_referrer" : "https://www.google.fr/", "entry_page" : "http://www.leroymerlin.fr/", "first_time" : "2017-01-10T16:57:39.937Z", From f412ce486c06fa2fe368da3285d8e2e7f85f7536 Mon Sep 17 00:00:00 2001 From: Hendrik Muhs Date: Tue, 25 Jan 2022 10:22:33 +0100 Subject: [PATCH 007/100] [Transform][Docs] Delete advise about group_by order (#82886) Since 7.15 transform automatically optimizes the grouping part of the query. We therefore can delete group_by order advise in the transform at scale documentation so it gets simpler. --- .../transform/transforms-at-scale.asciidoc | 22 +++++++------------ 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/docs/reference/transform/transforms-at-scale.asciidoc b/docs/reference/transform/transforms-at-scale.asciidoc index 0c9063b5dc373..f4ad4fb112dce 100644 --- a/docs/reference/transform/transforms-at-scale.asciidoc +++ b/docs/reference/transform/transforms-at-scale.asciidoc @@ -160,20 +160,14 @@ we do not recommend using a runtime field as the time field that synchronizes a [discrete] [[index-sorting-group-by-ordering]] -== 8. Use index sorting and `group_by` ordering (search, process) - -If you use more than one `group_by` field in your {transform}, then the order of -the fields in conjunction with the use of <> may -improve runtime. - -Index sorting enables you to store documents on disk in a specific order which -can improve query efficiency. The ideal sorting logic depends on your use case, -but the rule of thumb may be to sort the fields in descending order (high to low -cardinality) starting with the time-based fields. Then put the time-based -components first in the `group_by` if you have any, and then apply the same -order to your `group_by` fields as configured for index sorting. Index sorting -can be defined only once at index creation. If you don't already have index -sorting on the index that you want to use as a source, consider reindexing it to +== 8. Use index sorting (search, process) + +Index sorting enables you to store documents on disk in a specific order which +can improve query efficiency. The ideal sorting logic depends on your use case, +but the rule of thumb may be to sort the fields in descending order (high to low +cardinality) starting with the time-based fields. Index sorting +can be defined only once at index creation. If you don't already have index +sorting on the index that you want to use as a source, consider reindexing it to a new, sorted index. From f416b67b8789f737c0c152fc2e5c19aa9d4b4272 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Tue, 25 Jan 2022 10:59:49 +0100 Subject: [PATCH 008/100] Do no use x-opaque-id for deduplicating elastic originating requests (#82855) deprecated log messages originating from any elastic product requests should not be deduplicated with the use of x-opaque-id. If present, the value of X-elastic-product-origin will be used as part of the throttling key. relates #82810 --- docs/changelog/82855.yaml | 5 ++ .../common/logging/RateLimitingFilter.java | 11 +++- .../logging/RateLimitingFilterTests.java | 50 ++++++++++++++----- 3 files changed, 52 insertions(+), 14 deletions(-) create mode 100644 docs/changelog/82855.yaml diff --git a/docs/changelog/82855.yaml b/docs/changelog/82855.yaml new file mode 100644 index 0000000000000..6d5fc07abd889 --- /dev/null +++ b/docs/changelog/82855.yaml @@ -0,0 +1,5 @@ +pr: 82855 +summary: Do no use x-opaque-id for deduplicating elastic originating requests +area: Infra/Logging +type: enhancement +issues: [] diff --git a/server/src/main/java/org/elasticsearch/common/logging/RateLimitingFilter.java b/server/src/main/java/org/elasticsearch/common/logging/RateLimitingFilter.java index cdcf629b40e95..051cf6a20df48 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/RateLimitingFilter.java +++ b/server/src/main/java/org/elasticsearch/common/logging/RateLimitingFilter.java @@ -19,12 +19,14 @@ import org.apache.logging.log4j.core.config.plugins.PluginFactory; import org.apache.logging.log4j.core.filter.AbstractFilter; import org.apache.logging.log4j.message.Message; +import org.elasticsearch.common.Strings; import java.util.Collections; import java.util.LinkedHashMap; import java.util.Map; import java.util.Set; +import static org.elasticsearch.common.logging.DeprecatedMessage.ELASTIC_ORIGIN_FIELD_NAME; import static org.elasticsearch.common.logging.DeprecatedMessage.KEY_FIELD_NAME; import static org.elasticsearch.common.logging.DeprecatedMessage.X_OPAQUE_ID_FIELD_NAME; @@ -34,13 +36,14 @@ * passed by a user on a HTTP header. * This filter works by using a lruKeyCache - a set of keys which prevents a second message with the same key to be logged. * The lruKeyCache has a size limited to 128, which when breached will remove the oldest entries. - * + *

* It is possible to disable use of `x-opaque-id` as a key with {@link RateLimitingFilter#setUseXOpaqueId(boolean) } + * * @see Log4j2 Filters */ @Plugin(name = "RateLimitingFilter", category = Node.CATEGORY, elementType = Filter.ELEMENT_TYPE) public class RateLimitingFilter extends AbstractFilter { - + // a flag to disable/enable use of xOpaqueId controlled by changing cluster setting private volatile boolean useXOpaqueId = true; private final Set lruKeyCache = Collections.newSetFromMap(Collections.synchronizedMap(new LinkedHashMap<>() { @@ -76,6 +79,10 @@ public Result filter(Message message) { private String getKey(ESLogMessage esLogMessage) { final String key = esLogMessage.get(KEY_FIELD_NAME); + final String productOrigin = esLogMessage.get(ELASTIC_ORIGIN_FIELD_NAME); + if (Strings.isNullOrEmpty(productOrigin) == false) { + return productOrigin + key; + } if (useXOpaqueId) { String xOpaqueId = esLogMessage.get(X_OPAQUE_ID_FIELD_NAME); return xOpaqueId + key; diff --git a/server/src/test/java/org/elasticsearch/common/logging/RateLimitingFilterTests.java b/server/src/test/java/org/elasticsearch/common/logging/RateLimitingFilterTests.java index 61e6b3453ea1b..b3d9001c0b2f4 100644 --- a/server/src/test/java/org/elasticsearch/common/logging/RateLimitingFilterTests.java +++ b/server/src/test/java/org/elasticsearch/common/logging/RateLimitingFilterTests.java @@ -84,20 +84,20 @@ public void testMessagesAreRateLimitedByXOpaqueId() { public void testMessagesAreRateLimitedByKeyAndXOpaqueId() { // Fill up the cache for (int i = 0; i < 128; i++) { - Message message = DeprecatedMessage.of(DeprecationCategory.OTHER, "key " + i, "opaque-id " + i, "productName", "msg " + i); + Message message = DeprecatedMessage.of(DeprecationCategory.OTHER, "key " + i, "opaque-id " + i, null, "msg " + i); assertThat("Expected key" + i + " to be accepted", filter.filter(message), equalTo(Result.ACCEPT)); } // Should be rate-limited because it's still in the cache - Message message = DeprecatedMessage.of(DeprecationCategory.OTHER, "key 0", "opaque-id 0", "productName", "msg 0"); + Message message = DeprecatedMessage.of(DeprecationCategory.OTHER, "key 0", "opaque-id 0", null, "msg 0"); assertThat(filter.filter(message), equalTo(Result.DENY)); // Filter a message with a previously unseen key, in order to evict key0 as it's the oldest - message = DeprecatedMessage.of(DeprecationCategory.OTHER, "key 129", "opaque-id 129", "productName", "msg 129"); + message = DeprecatedMessage.of(DeprecationCategory.OTHER, "key 129", "opaque-id 129", null, "msg 129"); assertThat(filter.filter(message), equalTo(Result.ACCEPT)); // Should be allowed because key 0 was evicted from the cache - message = DeprecatedMessage.of(DeprecationCategory.OTHER, "key 0", "opaque-id 0", "productName", "msg 0"); + message = DeprecatedMessage.of(DeprecationCategory.OTHER, "key 0", "opaque-id 0", null, "msg 0"); assertThat(filter.filter(message), equalTo(Result.ACCEPT)); } @@ -106,18 +106,18 @@ public void testMessagesAreRateLimitedByKeyAndXOpaqueId() { * independently and checking that a message is not filtered. */ public void testVariationsInKeyAndXOpaqueId() { - Message message = DeprecatedMessage.of(DeprecationCategory.OTHER, "key 0", "opaque-id 0", "productName", "msg 0"); + Message message = DeprecatedMessage.of(DeprecationCategory.OTHER, "key 0", "opaque-id 0", null, "msg 0"); assertThat(filter.filter(message), equalTo(Result.ACCEPT)); - message = DeprecatedMessage.of(DeprecationCategory.OTHER, "key 0", "opaque-id 0", "productName", "msg 0"); + message = DeprecatedMessage.of(DeprecationCategory.OTHER, "key 0", "opaque-id 0", null, "msg 0"); // Rejected because the "x-opaque-id" and "key" values are the same as above assertThat(filter.filter(message), equalTo(Result.DENY)); - message = DeprecatedMessage.of(DeprecationCategory.OTHER, "key 1", "opaque-id 0", "productName", "msg 0"); + message = DeprecatedMessage.of(DeprecationCategory.OTHER, "key 1", "opaque-id 0", null, "msg 0"); // Accepted because the "key" value is different assertThat(filter.filter(message), equalTo(Result.ACCEPT)); - message = DeprecatedMessage.of(DeprecationCategory.OTHER, "key 0", "opaque-id 1", "productName", "msg 0"); + message = DeprecatedMessage.of(DeprecationCategory.OTHER, "key 0", "opaque-id 1", null, "msg 0"); // Accepted because the "x-opaque-id" value is different assertThat(filter.filter(message), equalTo(Result.ACCEPT)); } @@ -154,19 +154,45 @@ public void testMessagesXOpaqueIsIgnoredWhenDisabled() { filter.start(); // Should NOT be rate-limited because it's not in the cache - Message message = DeprecatedMessage.of(DeprecationCategory.OTHER, "key 0", "opaque-id 0", "productName", "msg 0"); + Message message = DeprecatedMessage.of(DeprecationCategory.OTHER, "key 0", "opaque-id 0", null, "msg 0"); assertThat(filter.filter(message), equalTo(Result.ACCEPT)); // Should be rate-limited because it was just added to the cache - message = DeprecatedMessage.of(DeprecationCategory.OTHER, "key 0", "opaque-id 0", "productName", "msg 0"); + message = DeprecatedMessage.of(DeprecationCategory.OTHER, "key 0", "opaque-id 0", null, "msg 0"); assertThat(filter.filter(message), equalTo(Result.DENY)); // Should be rate-limited because X-Opaque-Id is not used - message = DeprecatedMessage.of(DeprecationCategory.OTHER, "key 0", "opaque-id 1", "productName", "msg 0"); + message = DeprecatedMessage.of(DeprecationCategory.OTHER, "key 0", "opaque-id 1", null, "msg 0"); assertThat(filter.filter(message), equalTo(Result.DENY)); // Should NOT be rate-limited because "key 1" it not in the cache - message = DeprecatedMessage.of(DeprecationCategory.OTHER, "key 1", "opaque-id 1", "productName", "msg 0"); + message = DeprecatedMessage.of(DeprecationCategory.OTHER, "key 1", "opaque-id 1", null, "msg 0"); + assertThat(filter.filter(message), equalTo(Result.ACCEPT)); + } + + public void testXOpaqueIdNotBeingUsedFromElasticOriginatingRequests() { + RateLimitingFilter filter = new RateLimitingFilter(); + filter.setUseXOpaqueId(true); + filter.start(); + + // Should NOT be rate-limited because it's not in the cache + Message message = DeprecatedMessage.of(DeprecationCategory.OTHER, "key", "opaque-id 0", "kibana", "msg 0"); + assertThat(filter.filter(message), equalTo(Result.ACCEPT)); + + // Should be rate-limited even though the x-opaque-id is unique because it originates from kibana + message = DeprecatedMessage.of(DeprecationCategory.OTHER, "key", "opaque-id 1", "kibana", "msg 0"); + assertThat(filter.filter(message), equalTo(Result.DENY)); + + // Should not be rate-limited - it is the first request from beats. (x-opaque-id ignored as it originates from elastic) + message = DeprecatedMessage.of(DeprecationCategory.OTHER, "key", "opaque-id 0", "beats", "msg 0"); + assertThat(filter.filter(message), equalTo(Result.ACCEPT)); + + // second request from beats (elastic originating), should be rate-limited + message = DeprecatedMessage.of(DeprecationCategory.OTHER, "key", "opaque-id 1", "beats", "msg 0"); + assertThat(filter.filter(message), equalTo(Result.DENY)); + + // request from beats (elastic originating), but with a different key- should not be rate-limited + message = DeprecatedMessage.of(DeprecationCategory.OTHER, "key2", "opaque-id 1", "beats", "msg 1"); assertThat(filter.filter(message), equalTo(Result.ACCEPT)); } } From 65d27adb5566db44b2374979c343b3597c6f11e8 Mon Sep 17 00:00:00 2001 From: Ievgen Degtiarenko Date: Tue, 25 Jan 2022 11:47:26 +0100 Subject: [PATCH 009/100] Update the signature of the `submitStateUpdateTask` across the codebase. (#82942) This change updates submitStateUpdateTask signature to enforce task implements ClusterStateTaskListener and remove extra listener argument. This reduces the number of arguments and simplifies the call sites as all tasks are listeners already. --- .../indices/create/AutoCreateAction.java | 2 +- .../rollover/TransportRolloverAction.java | 2 +- .../cluster/ClusterStateTaskExecutor.java | 4 --- .../cluster/LocalMasterServiceTask.java | 3 +- .../action/shard/ShardStateAction.java | 6 ++-- .../cluster/coordination/Coordinator.java | 3 +- .../cluster/coordination/JoinHelper.java | 13 +++----- .../metadata/MetadataMappingService.java | 3 +- .../cluster/service/ClusterService.java | 18 ++++------ .../cluster/service/MasterService.java | 33 ++++++++----------- .../snapshots/SnapshotsService.java | 3 +- .../cluster/service/MasterServiceTests.java | 25 +++++++------- .../xpack/ilm/IndexLifecycleRunner.java | 2 +- .../xpack/ilm/IndexLifecycleRunnerTests.java | 11 +++---- .../xpack/ilm/IndexLifecycleServiceTests.java | 2 +- 15 files changed, 51 insertions(+), 79 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java index c45e7cc2f9fdb..dc04344e7628d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java @@ -156,7 +156,7 @@ protected void masterOperation( } }, finalListener::onFailure); CreateIndexTask clusterTask = new CreateIndexTask(request, listener, indexNameRef); - clusterService.submitStateUpdateTask("auto create [" + request.index() + "]", clusterTask, clusterTask, executor, clusterTask); + clusterService.submitStateUpdateTask("auto create [" + request.index() + "]", clusterTask, clusterTask, executor); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java index e2a048442ff55..aadbd728385d1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java @@ -181,7 +181,7 @@ protected void masterOperation( String source = "rollover_index source [" + trialRolloverIndexName + "] to target [" + trialRolloverIndexName + "]"; RolloverTask rolloverTask = new RolloverTask(rolloverRequest, statsResponse, trialRolloverResponse, listener); ClusterStateTaskConfig config = ClusterStateTaskConfig.build(Priority.NORMAL, rolloverRequest.masterNodeTimeout()); - clusterService.submitStateUpdateTask(source, rolloverTask, config, rolloverTaskExecutor, rolloverTask); + clusterService.submitStateUpdateTask(source, rolloverTask, config, rolloverTaskExecutor); } else { // conditions not met listener.onResponse(trialRolloverResponse); diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java b/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java index a425834b55632..835665a4b3960 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java @@ -106,10 +106,6 @@ private Builder result(T task, TaskResult executionResult) { public ClusterTasksResult build(ClusterState resultingState) { return new ClusterTasksResult<>(resultingState, executionResults); } - - ClusterTasksResult build(ClusterTasksResult result, ClusterState previousState) { - return new ClusterTasksResult<>(result.resultingState == null ? previousState : result.resultingState, executionResults); - } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/LocalMasterServiceTask.java b/server/src/main/java/org/elasticsearch/cluster/LocalMasterServiceTask.java index b5f404a67b679..fff3894c16d4a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/LocalMasterServiceTask.java +++ b/server/src/main/java/org/elasticsearch/cluster/LocalMasterServiceTask.java @@ -52,8 +52,7 @@ public ClusterTasksResult execute(ClusterState currentSt LocalMasterServiceTask.this.execute(currentState); return ClusterTasksResult.builder().successes(tasks).build(currentState); } - }, - this + } ); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index 76bc3a22da391..ac92b489ebb48 100644 --- a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -300,8 +300,7 @@ public void messageReceived(FailedShardEntry request, TransportChannel channel, TASK_SOURCE, update, ClusterStateTaskConfig.build(Priority.HIGH), - shardFailedClusterStateTaskExecutor, - update + shardFailedClusterStateTaskExecutor ); } } @@ -607,8 +606,7 @@ public void messageReceived(StartedShardEntry request, TransportChannel channel, "shard-started " + request, update, ClusterStateTaskConfig.build(Priority.URGENT), - shardStartedClusterStateTaskExecutor, - update + shardStartedClusterStateTaskExecutor ); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index 2be50a6ca6177..3c6ec286de6db 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -308,8 +308,7 @@ private void removeNode(DiscoveryNode discoveryNode, String reason) { "node-left", task, ClusterStateTaskConfig.build(Priority.IMMEDIATE), - nodeRemovalExecutor, - task + nodeRemovalExecutor ); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java index 8b79c553f2d52..7ee8a10fd7c37 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java @@ -48,14 +48,12 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; -import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; -import java.util.function.Consumer; import java.util.function.Function; import java.util.function.LongSupplier; import java.util.function.Supplier; @@ -403,7 +401,7 @@ public void handleJoinRequest(DiscoveryNode sender, ActionListener joinLis joinListener ); assert joinTaskExecutor != null; - masterService.submitStateUpdateTask("node-join", task, ClusterStateTaskConfig.build(Priority.URGENT), joinTaskExecutor, task); + masterService.submitStateUpdateTask("node-join", task, ClusterStateTaskConfig.build(Priority.URGENT), joinTaskExecutor); } @Override @@ -456,18 +454,17 @@ public void close(Mode newMode) { assert closed == false : "CandidateJoinAccumulator closed"; closed = true; if (newMode == Mode.LEADER) { - final Map pendingAsTasks = new LinkedHashMap<>(); - final Consumer pendingTaskAdder = task -> pendingAsTasks.put(task, task); + final List pendingAsTasks = new ArrayList<>(); joinRequestAccumulator.forEach( - (node, listener) -> pendingTaskAdder.accept( + (node, listener) -> pendingAsTasks.add( new JoinTaskExecutor.Task(node, joinReasonService.getJoinReason(node, Mode.CANDIDATE), listener) ) ); final String stateUpdateSource = "elected-as-master ([" + pendingAsTasks.size() + "] nodes joined)"; - pendingTaskAdder.accept(JoinTaskExecutor.newBecomeMasterTask()); - pendingTaskAdder.accept(JoinTaskExecutor.newFinishElectionTask()); + pendingAsTasks.add(JoinTaskExecutor.newBecomeMasterTask()); + pendingAsTasks.add(JoinTaskExecutor.newFinishElectionTask()); joinTaskExecutor = joinTaskExecutorGenerator.get(); masterService.submitStateUpdateTasks( stateUpdateSource, diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMappingService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMappingService.java index ad30d1c03fba5..59313b4e71021 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMappingService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMappingService.java @@ -253,8 +253,7 @@ public void putMapping(final PutMappingClusterStateUpdateRequest request, final "put-mapping " + Strings.arrayToCommaDelimitedString(request.indices()), task, ClusterStateTaskConfig.build(Priority.HIGH, request.masterNodeTimeout()), - putMappingExecutor, - task + putMappingExecutor ); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/service/ClusterService.java b/server/src/main/java/org/elasticsearch/cluster/service/ClusterService.java index a74c069fcc7ea..9f97e63c14a3b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/ClusterService.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/ClusterService.java @@ -28,7 +28,7 @@ import org.elasticsearch.node.Node; import org.elasticsearch.threadpool.ThreadPool; -import java.util.Collections; +import java.util.List; public class ClusterService extends AbstractLifecycleComponent { private final MasterService masterService; @@ -223,8 +223,7 @@ public final String getNodeName() { } /** - * Submits a cluster state update task; unlike {@link #submitStateUpdateTask(String, Object, ClusterStateTaskConfig, - * ClusterStateTaskExecutor, ClusterStateTaskListener)}. + * Submits a cluster state update task * @param source the source of the cluster state update task * @param updateTask the full context for the cluster state update * @param executor the executor to use for the submitted task. @@ -234,7 +233,7 @@ public void submit T updateTask, ClusterStateTaskExecutor executor ) { - submitStateUpdateTask(source, updateTask, updateTask, executor, updateTask); + submitStateUpdateTask(source, updateTask, updateTask, executor); } /** @@ -246,24 +245,21 @@ public void submit * tasks will all be executed on the executor in a single batch * * @param source the source of the cluster state update task - * @param task the state needed for the cluster state update task + * @param task the state and the callback needed for the cluster state update task * @param config the cluster state update task configuration * @param executor the cluster state update task executor; tasks * that share the same executor will be executed * batches on this executor - * @param listener callback after the cluster state update task - * completes * @param the type of the cluster state update task state * */ - public void submitStateUpdateTask( + public void submitStateUpdateTask( String source, T task, ClusterStateTaskConfig config, - ClusterStateTaskExecutor executor, - ClusterStateTaskListener listener + ClusterStateTaskExecutor executor ) { - masterService.submitStateUpdateTasks(source, Collections.singletonMap(task, listener), config, executor); + masterService.submitStateUpdateTasks(source, List.of(task), config, executor); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java index e7fd0927eb8a8..f664b0514a0c8 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java @@ -45,7 +45,7 @@ import org.elasticsearch.threadpool.ThreadPool; import java.util.Arrays; -import java.util.Collections; +import java.util.Collection; import java.util.List; import java.util.Locale; import java.util.Map; @@ -426,9 +426,8 @@ public Builder incrementVersion(ClusterState clusterState) { } /** - * Submits a cluster state update task; unlike {@link #submitStateUpdateTask(String, Object, ClusterStateTaskConfig, - * ClusterStateTaskExecutor, ClusterStateTaskListener)}, submitted updates will not be batched. - * @param source the source of the cluster state update task + * Submits a cluster state update task + * @param source the source of the cluster state update task * @param updateTask the full context for the cluster state update * @param executor * @@ -438,7 +437,7 @@ public void submit T updateTask, ClusterStateTaskExecutor executor ) { - submitStateUpdateTask(source, updateTask, updateTask, executor, updateTask); + submitStateUpdateTask(source, updateTask, updateTask, executor); } /** @@ -450,24 +449,21 @@ public void submit * tasks will all be executed on the executor in a single batch * * @param source the source of the cluster state update task - * @param task the state needed for the cluster state update task + * @param task the state and the callback needed for the cluster state update task * @param config the cluster state update task configuration * @param executor the cluster state update task executor; tasks * that share the same executor will be executed * batches on this executor - * @param listener callback after the cluster state update task - * completes * @param the type of the cluster state update task state * */ - public void submitStateUpdateTask( + public void submitStateUpdateTask( String source, T task, ClusterStateTaskConfig config, - ClusterStateTaskExecutor executor, - ClusterStateTaskListener listener + ClusterStateTaskExecutor executor ) { - submitStateUpdateTasks(source, Collections.singletonMap(task, listener), config, executor); + submitStateUpdateTasks(source, List.of(task), config, executor); } /** @@ -896,7 +892,7 @@ void onNoLongerMaster() { * potentially with more tasks of the same executor. * * @param source the source of the cluster state update task - * @param tasks a map of update tasks and their corresponding listeners + * @param tasks a collection of update tasks and their corresponding listeners * @param config the cluster state update task configuration * @param executor the cluster state update task executor; tasks * that share the same executor will be executed @@ -904,9 +900,9 @@ void onNoLongerMaster() { * @param the type of the cluster state update task state * */ - public void submitStateUpdateTasks( + public void submitStateUpdateTasks( final String source, - final Map tasks, + final Collection tasks, final ClusterStateTaskConfig config, final ClusterStateTaskExecutor executor ) { @@ -918,10 +914,9 @@ public void submitStateUpdateTasks( try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { threadContext.markAsSystemContext(); - List safeTasks = tasks.entrySet() - .stream() - .map(e -> taskBatcher.new UpdateTask(config.priority(), source, e.getKey(), safe(e.getValue(), supplier), executor)) - .collect(Collectors.toList()); + List safeTasks = tasks.stream() + .map(e -> taskBatcher.new UpdateTask(config.priority(), source, e, safe(e, supplier), executor)) + .toList(); taskBatcher.submitTasks(safeTasks, config.timeout()); } catch (EsRejectedExecutionException e) { // ignore cases where we are shutting down..., there is really nothing interesting diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index bd6de5a89a149..c8b3273897683 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -3418,8 +3418,7 @@ private void innerUpdateSnapshotState( "update snapshot state", update, ClusterStateTaskConfig.build(Priority.NORMAL), - SHARD_STATE_EXECUTOR, - update + SHARD_STATE_EXECUTOR ); } diff --git a/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java index c0daaeb9153c4..66cb1fac6bdf1 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java @@ -65,11 +65,9 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Function; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; -import static java.util.stream.Collectors.toMap; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -275,11 +273,14 @@ public void onFailure(Exception e) {} "testClusterStateTaskListenerThrowingExceptionIsOkay", update, ClusterStateTaskConfig.build(Priority.NORMAL), - new ClusterStateTaskExecutor() { + new ClusterStateTaskExecutor<>() { @Override - public ClusterTasksResult execute(ClusterState currentState, List tasks) { + public ClusterTasksResult execute( + ClusterState currentState, + List tasks + ) { ClusterState newClusterState = ClusterState.builder(currentState).build(); - return ClusterTasksResult.builder().successes(tasks).build(newClusterState); + return ClusterTasksResult.builder().successes(tasks).build(newClusterState); } @Override @@ -287,8 +288,7 @@ public void clusterStatePublished(ClusterStatePublicationEvent clusterStatePubli published.set(true); latch.countDown(); } - }, - update + } ); latch.await(); @@ -602,18 +602,16 @@ public void clusterStatePublished(ClusterStatePublicationEvent clusterPublicatio var executor = assignment.v1(); submittedTasks.addAndGet(tasks.size()); if (tasks.size() == 1) { - var update = tasks.iterator().next(); masterService.submitStateUpdateTask( threadName, - update, + tasks.iterator().next(), ClusterStateTaskConfig.build(randomFrom(Priority.values())), - executor, - update + executor ); } else { masterService.submitStateUpdateTasks( threadName, - tasks.stream().collect(toMap(Function.identity(), Function.identity())), + tasks, ClusterStateTaskConfig.build(randomFrom(Priority.values())), executor ); @@ -685,8 +683,7 @@ public void onFailure(Exception e) {} (currentState, tasks) -> { ClusterState newClusterState = ClusterState.builder(currentState).build(); return ClusterTasksResult.builder().successes(tasks).build(newClusterState); - }, - update + } ); latch.await(); diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunner.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunner.java index 84d83dbd3e0c0..cb10c33071523 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunner.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunner.java @@ -684,7 +684,7 @@ private void submitUnlessAlreadyQueued(String source, IndexLifecycleClusterState busyIndices.remove(dedupKey); assert removed : "tried to unregister unknown task [" + task + "]"; })); - clusterService.submitStateUpdateTask(source, task, ILM_TASK_CONFIG, ILM_TASK_EXECUTOR, task); + clusterService.submitStateUpdateTask(source, task, ILM_TASK_CONFIG, ILM_TASK_EXECUTOR); } else { logger.trace("skipped redundant execution of [{}]", source); } diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunnerTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunnerTests.java index 4d5aa0106cd13..eca475824909f 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunnerTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunnerTests.java @@ -175,7 +175,7 @@ public void testRunPolicyPhaseCompleteWithMoreStepsPolicyStep() { runner.runPolicyAfterStateChange(policyName, indexMetadata); runner.runPeriodicStep(policyName, Metadata.builder().put(indexMetadata, true).build(), indexMetadata); - Mockito.verify(clusterService, times(1)).submitStateUpdateTask(anyString(), any(), any(), any(), any()); + Mockito.verify(clusterService, times(1)).submitStateUpdateTask(anyString(), any(), any(), any()); } public void testRunPolicyErrorStep() { @@ -656,8 +656,7 @@ public void testRunPolicyClusterStateActionStep() { ilm-execute-cluster-state-steps [{"phase":"phase","action":"action","name":"cluster_state_action_step"} => null]"""), Mockito.argThat(taskMatcher), eq(IndexLifecycleRunner.ILM_TASK_CONFIG), - any(), - Mockito.argThat(taskMatcher) + any() ); Mockito.verifyNoMoreInteractions(clusterService); } @@ -684,8 +683,7 @@ public void testRunPolicyClusterStateWaitStep() { ilm-execute-cluster-state-steps [{"phase":"phase","action":"action","name":"cluster_state_action_step"} => null]"""), Mockito.argThat(taskMatcher), eq(IndexLifecycleRunner.ILM_TASK_CONFIG), - any(), - Mockito.argThat(taskMatcher) + any() ); Mockito.verifyNoMoreInteractions(clusterService); } @@ -766,8 +764,7 @@ public void testRunPolicyThatDoesntExist() { Mockito.eq("ilm-set-step-info {policy [cluster_state_action_policy], index [my_index], currentStep [null]}"), Mockito.argThat(taskMatcher), eq(IndexLifecycleRunner.ILM_TASK_CONFIG), - any(), - Mockito.argThat(taskMatcher) + any() ); Mockito.verifyNoMoreInteractions(clusterService); } diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleServiceTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleServiceTests.java index 77602c5381016..79623d0abf662 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleServiceTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleServiceTests.java @@ -338,7 +338,7 @@ public void testRequestedStopOnSafeAction() { doAnswer(invocationOnMock -> { ranPolicy.set(true); throw new AssertionError("invalid invocation"); - }).when(clusterService).submitStateUpdateTask(anyString(), any(), eq(IndexLifecycleRunner.ILM_TASK_CONFIG), any(), any()); + }).when(clusterService).submitStateUpdateTask(anyString(), any(), eq(IndexLifecycleRunner.ILM_TASK_CONFIG), any()); doAnswer(invocationOnMock -> { OperationModeUpdateTask task = (OperationModeUpdateTask) invocationOnMock.getArguments()[1]; From 18e40fcbd5a909314f949e018d0dc38d64f97971 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 25 Jan 2022 11:52:44 +0100 Subject: [PATCH 010/100] Make IndexNameExpressionResolver more Static (#82940) Lots of things can be made static here. I found these while investigating some index-math related issues and think making more things static here that are stateless+static in practice makes the code much easier to follow and might also offer a slight performance gain in expression resolving. --- .../routing/AliasResolveRoutingIT.java | 2 +- .../DedicatedClusterSnapshotRestoreIT.java | 6 +- .../alias/TransportIndicesAliasesAction.java | 2 +- .../indices/create/AutoCreateAction.java | 2 +- .../create/TransportCreateIndexAction.java | 2 +- .../rollover/MetadataRolloverService.java | 13 +- .../indices/shrink/TransportResizeAction.java | 4 +- .../metadata/IndexAbstractionResolver.java | 2 +- .../metadata/IndexNameExpressionResolver.java | 118 ++++------- .../org/elasticsearch/index/IndexService.java | 4 +- .../elasticsearch/ingest/IngestService.java | 4 +- .../snapshots/SnapshotsService.java | 4 +- .../MetadataRolloverServiceTests.java | 27 +-- .../TransportRolloverActionTests.java | 2 - .../DateMathExpressionResolverTests.java | 32 +-- .../IndexNameExpressionResolverTests.java | 63 +----- .../MetadataCreateIndexServiceTests.java | 13 +- .../WildcardExpressionResolverTests.java | 186 +++++++++++++----- .../metadata/DataStreamTestHelper.java | 11 +- .../core/ilm/GenerateSnapshotNameStep.java | 5 +- .../DateMathExpressionIntegTests.java | 4 +- .../authz/IndicesAndAliasesResolver.java | 2 +- .../authz/IndicesAndAliasesResolverTests.java | 9 +- 23 files changed, 225 insertions(+), 292 deletions(-) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasResolveRoutingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasResolveRoutingIT.java index 0de71d3f8f9c3..fc0636fb82ec1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasResolveRoutingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasResolveRoutingIT.java @@ -205,7 +205,7 @@ public void testResolveSearchRouting() { ); assertThat( - indexNameExpressionResolver.resolveSearchRoutingAllIndices(state.metadata(), "0,1,2,tw , ltw , lw"), + IndexNameExpressionResolver.resolveSearchRoutingAllIndices(state.metadata(), "0,1,2,tw , ltw , lw"), equalTo( newMap( "test1", diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index ec4fd903b047c..03afc00224a4e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -45,7 +45,6 @@ import org.elasticsearch.index.seqno.RetentionLeaseActions; import org.elasticsearch.index.seqno.RetentionLeases; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.indices.TestIndexNameExpressionResolver; import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.node.Node; @@ -685,7 +684,6 @@ public void testRestoreShrinkIndex() throws Exception { public void testSnapshotWithDateMath() { final String repo = "repo"; - final IndexNameExpressionResolver nameExpressionResolver = TestIndexNameExpressionResolver.newInstance(); final String snapshotName = ""; logger.info("--> creating repository"); @@ -695,11 +693,11 @@ public void testSnapshotWithDateMath() { .setSettings(Settings.builder().put("location", randomRepoPath()).put("compress", randomBoolean())) ); - final String expression1 = nameExpressionResolver.resolveDateMathExpression(snapshotName); + final String expression1 = IndexNameExpressionResolver.resolveDateMathExpression(snapshotName); logger.info("--> creating date math snapshot"); createFullSnapshot(repo, snapshotName); // snapshot could be taken before or after a day rollover - final String expression2 = nameExpressionResolver.resolveDateMathExpression(snapshotName); + final String expression2 = IndexNameExpressionResolver.resolveDateMathExpression(snapshotName); SnapshotsStatusResponse response = clusterAdmin().prepareSnapshotStatus(repo) .setSnapshots(Sets.newHashSet(expression1, expression2).toArray(Strings.EMPTY_ARRAY)) diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java index 44478331629cf..e212cd16f6075 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java @@ -204,7 +204,7 @@ protected void masterOperation( switch (action.actionType()) { case ADD: for (String alias : concreteAliases(action, state.metadata(), index.getName())) { - String resolvedName = this.indexNameExpressionResolver.resolveDateMathExpression(alias, now); + String resolvedName = IndexNameExpressionResolver.resolveDateMathExpression(alias, now); finalActions.add( new AliasAction.Add( index.getName(), diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java index dc04344e7628d..adabbd19f6fc1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java @@ -212,7 +212,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { indexNameRef.set(clusterState.metadata().dataStreams().get(request.index()).getIndices().get(0).getName()); return clusterState; } else { - String indexName = indexNameExpressionResolver.resolveDateMathExpression(request.index()); + String indexName = IndexNameExpressionResolver.resolveDateMathExpression(request.index()); indexNameRef.set(indexName); if (isSystemIndex) { if (indexName.equals(request.index()) == false) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java index 62fa64c537b83..a7767a96a2cdb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java @@ -90,7 +90,7 @@ protected void masterOperation( } final long resolvedAt = System.currentTimeMillis(); - final String indexName = indexNameExpressionResolver.resolveDateMathExpression(request.index(), resolvedAt); + final String indexName = IndexNameExpressionResolver.resolveDateMathExpression(request.index(), resolvedAt); final SystemIndexDescriptor mainDescriptor = systemIndices.findMatchingDescriptor(indexName); final boolean isSystemIndex = mainDescriptor != null; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java index 62d2a3141a977..4f08d6a50eda4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java @@ -61,7 +61,6 @@ public class MetadataRolloverService { private final ThreadPool threadPool; private final MetadataCreateIndexService createIndexService; private final MetadataIndexAliasesService indexAliasesService; - private final IndexNameExpressionResolver indexNameExpressionResolver; private final SystemIndices systemIndices; @Inject @@ -69,13 +68,11 @@ public MetadataRolloverService( ThreadPool threadPool, MetadataCreateIndexService createIndexService, MetadataIndexAliasesService indexAliasesService, - IndexNameExpressionResolver indexNameExpressionResolver, SystemIndices systemIndices ) { this.threadPool = threadPool; this.createIndexService = createIndexService; this.indexAliasesService = indexAliasesService; - this.indexNameExpressionResolver = indexNameExpressionResolver; this.systemIndices = systemIndices; } @@ -163,10 +160,8 @@ private NameResolution resolveAliasRolloverNames(Metadata metadata, IndexAbstrac final String sourceProvidedName = writeIndex.getSettings() .get(IndexMetadata.SETTING_INDEX_PROVIDED_NAME, writeIndex.getIndex().getName()); final String sourceIndexName = writeIndex.getIndex().getName(); - final String unresolvedName = (newIndexName != null) - ? newIndexName - : generateRolloverIndexName(sourceProvidedName, indexNameExpressionResolver); - final String rolloverIndexName = indexNameExpressionResolver.resolveDateMathExpression(unresolvedName); + final String unresolvedName = (newIndexName != null) ? newIndexName : generateRolloverIndexName(sourceProvidedName); + final String rolloverIndexName = IndexNameExpressionResolver.resolveDateMathExpression(unresolvedName); return new NameResolution(sourceIndexName, unresolvedName, rolloverIndexName); } @@ -293,8 +288,8 @@ private RolloverResult rolloverDataStream( return new RolloverResult(newWriteIndexName, originalWriteIndex.getName(), newState); } - static String generateRolloverIndexName(String sourceIndexName, IndexNameExpressionResolver indexNameExpressionResolver) { - String resolvedName = indexNameExpressionResolver.resolveDateMathExpression(sourceIndexName); + static String generateRolloverIndexName(String sourceIndexName) { + String resolvedName = IndexNameExpressionResolver.resolveDateMathExpression(sourceIndexName); final boolean isDateMath = sourceIndexName.equals(resolvedName) == false; if (INDEX_NAME_PATTERN.matcher(resolvedName).matches()) { int numberIndex = sourceIndexName.lastIndexOf("-"); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java index d85690e330f46..173a0f7b2bb9d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java @@ -116,8 +116,8 @@ protected void masterOperation( ) { // there is no need to fetch docs stats for split but we keep it simple and do it anyway for simplicity of the code - final String sourceIndex = indexNameExpressionResolver.resolveDateMathExpression(resizeRequest.getSourceIndex()); - final String targetIndex = indexNameExpressionResolver.resolveDateMathExpression(resizeRequest.getTargetIndexRequest().index()); + final String sourceIndex = IndexNameExpressionResolver.resolveDateMathExpression(resizeRequest.getSourceIndex()); + final String targetIndex = IndexNameExpressionResolver.resolveDateMathExpression(resizeRequest.getTargetIndexRequest().index()); final IndexMetadata sourceMetadata = state.metadata().index(sourceIndex); if (sourceMetadata == null) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexAbstractionResolver.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexAbstractionResolver.java index 4fc009d61e82b..6b560213bfb85 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexAbstractionResolver.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexAbstractionResolver.java @@ -76,7 +76,7 @@ public List resolveIndexAbstractions( } // we always need to check for date math expressions - final String dateMathName = indexNameExpressionResolver.resolveDateMathExpression(indexAbstraction); + final String dateMathName = IndexNameExpressionResolver.resolveDateMathExpression(indexAbstraction); if (dateMathName != indexAbstraction) { assert dateMathName.equals(indexAbstraction) == false; if (replaceWildcards && Regex.isSimpleMatchPattern(dateMathName)) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index 7ca854d5a2a0f..6eee3b561499f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -61,10 +61,6 @@ public class IndexNameExpressionResolver { public static final String EXCLUDED_DATA_STREAMS_KEY = "es.excluded_ds"; public static final Version SYSTEM_INDEX_ENFORCEMENT_VERSION = Version.V_8_0_0; - private final DateMathExpressionResolver dateMathExpressionResolver = new DateMathExpressionResolver(); - private final WildcardExpressionResolver wildcardExpressionResolver = new WildcardExpressionResolver(); - private final List expressionResolvers = List.of(dateMathExpressionResolver, wildcardExpressionResolver); - private final ThreadContext threadContext; private final SystemIndices systemIndices; @@ -194,10 +190,7 @@ public List dataStreamNames(ClusterState state, IndicesOptions options, indexExpressions = new String[] { "*" }; } - List expressions = Arrays.asList(indexExpressions); - for (ExpressionResolver expressionResolver : expressionResolvers) { - expressions = expressionResolver.resolve(context, expressions); - } + final List expressions = resolveExpressions(Arrays.asList(indexExpressions), context); return ((expressions == null) ? List.of() : expressions).stream() .map(x -> state.metadata().getIndicesLookup().get(x)) .filter(Objects::nonNull) @@ -228,10 +221,7 @@ public IndexAbstraction resolveWriteIndexAbstraction(ClusterState state, DocWrit getNetNewSystemIndexPredicate() ); - List expressions = List.of(request.index()); - for (ExpressionResolver expressionResolver : expressionResolvers) { - expressions = expressionResolver.resolve(context, expressions); - } + final List expressions = resolveExpressions(List.of(request.index()), context); if (expressions.size() == 1) { IndexAbstraction ia = state.metadata().getIndicesLookup().get(expressions.get(0)); @@ -259,6 +249,10 @@ public IndexAbstraction resolveWriteIndexAbstraction(ClusterState state, DocWrit } } + private static List resolveExpressions(List expressions, Context context) { + return WildcardExpressionResolver.resolve(context, DateMathExpressionResolver.resolve(context, expressions)); + } + /** * Translates the provided index expression into actual concrete indices, properly deduplicated. * @@ -333,9 +327,7 @@ Index[] concreteIndices(Context context, String... indexExpressions) { indexExpressions = new String[] { Metadata.ALL }; } else { if (options.ignoreUnavailable() == false) { - List crossClusterIndices = Arrays.stream(indexExpressions) - .filter(index -> index.contains(":")) - .collect(Collectors.toList()); + List crossClusterIndices = Arrays.stream(indexExpressions).filter(index -> index.contains(":")).toList(); if (crossClusterIndices.size() > 0) { throw new IllegalArgumentException( "Cross-cluster calls are not supported in this context but remote indices " @@ -351,10 +343,7 @@ Index[] concreteIndices(Context context, String... indexExpressions) { final boolean failNoIndices = indexExpressions.length == 1 ? options.allowNoIndices() == false : options.ignoreUnavailable() == false; - List expressions = Arrays.asList(indexExpressions); - for (ExpressionResolver expressionResolver : expressionResolvers) { - expressions = expressionResolver.resolve(context, expressions); - } + final List expressions = resolveExpressions(Arrays.asList(indexExpressions), context); if (expressions.isEmpty()) { if (options.allowNoIndices() == false) { @@ -468,7 +457,7 @@ private void checkSystemIndexAccess(Context context, Set concreteIndices) .map(metadata::index) .filter(IndexMetadata::isSystem) .filter(idxMetadata -> systemIndexAccessPredicate.test(idxMetadata.getIndex().getName())) - .collect(Collectors.toList()); + .toList(); if (systemIndicesThatShouldNotBeAccessed.isEmpty()) { return; @@ -651,7 +640,7 @@ public boolean hasIndexAbstraction(String indexAbstraction, ClusterState state) /** * @return If the specified string is data math expression then this method returns the resolved expression. */ - public String resolveDateMathExpression(String dateExpression) { + public static String resolveDateMathExpression(String dateExpression) { return DateMathExpressionResolver.resolveExpression(dateExpression); } @@ -659,7 +648,7 @@ public String resolveDateMathExpression(String dateExpression) { * @param time instant to consider when parsing the expression * @return If the specified string is data math expression then this method returns the resolved expression. */ - public String resolveDateMathExpression(String dateExpression, long time) { + public static String resolveDateMathExpression(String dateExpression, long time) { return DateMathExpressionResolver.resolveExpression(dateExpression, () -> time); } @@ -677,11 +666,7 @@ public Set resolveExpressions(ClusterState state, String... expressions) getSystemIndexAccessPredicate(), getNetNewSystemIndexPredicate() ); - List resolvedExpressions = Arrays.asList(expressions); - for (ExpressionResolver expressionResolver : expressionResolvers) { - resolvedExpressions = expressionResolver.resolve(context, resolvedExpressions); - } - return Set.copyOf(resolvedExpressions); + return Set.copyOf(resolveExpressions(Arrays.asList(expressions), context)); } /** @@ -792,7 +777,6 @@ public String[] indexAliases( * @return routing values grouped by concrete index */ public Map> resolveSearchRouting(ClusterState state, @Nullable String routing, String... expressions) { - List resolvedExpressions = expressions != null ? Arrays.asList(expressions) : Collections.emptyList(); Context context = new Context( state, IndicesOptions.lenientExpandOpen(), @@ -803,9 +787,10 @@ public Map> resolveSearchRouting(ClusterState state, @Nullab getSystemIndexAccessPredicate(), getNetNewSystemIndexPredicate() ); - for (ExpressionResolver expressionResolver : expressionResolvers) { - resolvedExpressions = expressionResolver.resolve(context, resolvedExpressions); - } + final List resolvedExpressions = resolveExpressions( + expressions != null ? Arrays.asList(expressions) : Collections.emptyList(), + context + ); // TODO: it appears that this can never be true? if (isAllIndices(resolvedExpressions)) { @@ -832,11 +817,7 @@ public Map> resolveSearchRouting(ClusterState state, @Nullab if (routings == null) { routings = new HashMap<>(); } - Set r = routings.get(concreteIndex); - if (r == null) { - r = new HashSet<>(); - routings.put(concreteIndex, r); - } + Set r = routings.computeIfAbsent(concreteIndex, k -> new HashSet<>()); r.addAll(aliasMetadata.searchRoutingValues()); if (paramRouting != null) { r.retainAll(paramRouting); @@ -915,7 +896,7 @@ public Map> resolveSearchRouting(ClusterState state, @Nullab /** * Sets the same routing for all indices */ - public Map> resolveSearchRoutingAllIndices(Metadata metadata, String routing) { + public static Map> resolveSearchRoutingAllIndices(Metadata metadata, String routing) { if (routing != null) { Set r = Sets.newHashSet(Strings.splitStringByCommaToArray(routing)); Map> routings = new HashMap<>(); @@ -950,33 +931,6 @@ static boolean isExplicitAllPattern(Collection aliasesOrIndices) { return aliasesOrIndices != null && aliasesOrIndices.size() == 1 && Metadata.ALL.equals(aliasesOrIndices.iterator().next()); } - /** - * Identifies whether the first argument (an array containing index names) is a pattern that matches all indices - * - * @param indicesOrAliases the array containing index names - * @param concreteIndices array containing the concrete indices that the first argument refers to - * @return true if the first argument is a pattern that maps to all available indices, false otherwise - */ - boolean isPatternMatchingAllIndices(Metadata metadata, String[] indicesOrAliases, String[] concreteIndices) { - // if we end up matching on all indices, check, if its a wildcard parameter, or a "-something" structure - if (concreteIndices.length == metadata.getConcreteAllIndices().length && indicesOrAliases.length > 0) { - - // we might have something like /-test1,+test1 that would identify all indices - // or something like /-test1 with test1 index missing and IndicesOptions.lenient() - if (indicesOrAliases[0].charAt(0) == '-') { - return true; - } - - // otherwise we check if there's any simple regex - for (String indexOrAlias : indicesOrAliases) { - if (Regex.isSimpleMatchPattern(indexOrAlias)) { - return true; - } - } - } - return false; - } - public SystemIndexAccessLevel getSystemIndexAccessLevel() { final SystemIndexAccessLevel accessLevel = systemIndices.getSystemIndexAccessLevel(threadContext); assert accessLevel != SystemIndexAccessLevel.BACKWARDS_COMPATIBLE_ONLY @@ -1182,25 +1136,16 @@ public Predicate getSystemIndexAccessPredicate() { } } - private interface ExpressionResolver { - - /** - * Resolves the list of expressions into other expressions if possible (possible concrete indices and aliases, but - * that isn't required). The provided implementations can also be left untouched. - * - * @return a new list with expressions based on the provided expressions - */ - List resolve(Context context, List expressions); - - } - /** * Resolves alias/index name expressions with wildcards into the corresponding concrete indices/aliases */ - static final class WildcardExpressionResolver implements ExpressionResolver { + static final class WildcardExpressionResolver { - @Override - public List resolve(Context context, List expressions) { + private WildcardExpressionResolver() { + // Utility class + } + + public static List resolve(Context context, List expressions) { IndicesOptions options = context.getOptions(); Metadata metadata = context.getState().metadata(); // only check open/closed since if we do not expand to open or closed it doesn't make sense to @@ -1247,7 +1192,7 @@ public List resolve(Context context, List expressions) { return new ArrayList<>(result); } - private Set innerResolve(Context context, List expressions, IndicesOptions options, Metadata metadata) { + private static Set innerResolve(Context context, List expressions, IndicesOptions options, Metadata metadata) { Set result = null; boolean wildcardSeen = false; for (int i = 0; i < expressions.size(); i++) { @@ -1464,7 +1409,7 @@ private static boolean implicitHiddenMatch(String itemName, String expression) { return itemName.startsWith(".") && expression.startsWith(".") && Regex.isSimpleMatchPattern(expression); } - private boolean isEmptyOrTrivialWildcard(List expressions) { + private static boolean isEmptyOrTrivialWildcard(List expressions) { return expressions.isEmpty() || (expressions.size() == 1 && (Metadata.ALL.equals(expressions.get(0)) || Regex.isMatchAllPattern(expressions.get(0)))); } @@ -1498,7 +1443,7 @@ private static List resolveEmptyOrTrivialWildcardWithAllowedSystemIndice } } return true; - }).collect(Collectors.toUnmodifiableList()); + }).toList(); } private static String[] resolveEmptyOrTrivialWildcardToAllIndices(IndicesOptions options, Metadata metadata) { @@ -1520,7 +1465,7 @@ private static String[] resolveEmptyOrTrivialWildcardToAllIndices(IndicesOptions } } - public static final class DateMathExpressionResolver implements ExpressionResolver { + public static final class DateMathExpressionResolver { private static final DateFormatter DEFAULT_DATE_FORMATTER = DateFormatter.forPattern("uuuu.MM.dd"); private static final String EXPRESSION_LEFT_BOUND = "<"; @@ -1530,8 +1475,11 @@ public static final class DateMathExpressionResolver implements ExpressionResolv private static final char ESCAPE_CHAR = '\\'; private static final char TIME_ZONE_BOUND = '|'; - @Override - public List resolve(final Context context, List expressions) { + private DateMathExpressionResolver() { + // utility class + } + + public static List resolve(final Context context, List expressions) { List result = new ArrayList<>(expressions.size()); for (String expression : expressions) { result.add(resolveExpression(expression, context::getStartTime)); diff --git a/server/src/main/java/org/elasticsearch/index/IndexService.java b/server/src/main/java/org/elasticsearch/index/IndexService.java index f8c370c37673e..d5b7ced702ff3 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexService.java +++ b/server/src/main/java/org/elasticsearch/index/IndexService.java @@ -863,11 +863,11 @@ private void rescheduleRefreshTasks() { } public Function dateMathExpressionResolverAt() { - return expression -> expressionResolver.resolveDateMathExpression(expression, System.currentTimeMillis()); + return expression -> IndexNameExpressionResolver.resolveDateMathExpression(expression, System.currentTimeMillis()); } public Function dateMathExpressionResolverAt(long instant) { - return expression -> expressionResolver.resolveDateMathExpression(expression, instant); + return expression -> IndexNameExpressionResolver.resolveDateMathExpression(expression, instant); } public interface ShardStoreDeleter { diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestService.java b/server/src/main/java/org/elasticsearch/ingest/IngestService.java index 413dff876c812..10a12a97b171b 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestService.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestService.java @@ -88,8 +88,6 @@ public class IngestService implements ClusterStateApplier, ReportingService resolvedNames = DATE_MATH_EXPRESSION_RESOLVER.resolve( + List resolvedNames = IndexNameExpressionResolver.DateMathExpressionResolver.resolve( new IndexNameExpressionResolver.ResolverContext(epochMillis), List.of(unresolvedIndexName) ); diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index c8b3273897683..77885fb624e4c 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -241,7 +241,7 @@ public void executeSnapshot(final CreateSnapshotRequest request, final ActionLis */ public void createSnapshot(final CreateSnapshotRequest request, final ActionListener listener) { final String repositoryName = request.repository(); - final String snapshotName = indexNameExpressionResolver.resolveDateMathExpression(request.snapshot()); + final String snapshotName = IndexNameExpressionResolver.resolveDateMathExpression(request.snapshot()); validate(repositoryName, snapshotName); // TODO: create snapshot UUID in CreateSnapshotRequest and make this operation idempotent to cleanly deal with transport layer // retries @@ -455,7 +455,7 @@ public void cloneSnapshot(CloneSnapshotRequest request, ActionListener lis listener.onFailure(new RepositoryException(repositoryName, "cannot create snapshot in a readonly repository")); return; } - final String snapshotName = indexNameExpressionResolver.resolveDateMathExpression(request.target()); + final String snapshotName = IndexNameExpressionResolver.resolveDateMathExpression(request.target()); validate(repositoryName, snapshotName); // TODO: create snapshot UUID in CloneSnapshotRequest and make this operation idempotent to cleanly deal with transport layer // retries diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java index ec6f8e7598bfd..036a983320551 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java @@ -60,7 +60,6 @@ import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.nullValue; -import static org.mockito.AdditionalAnswers.returnsFirstArg; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.same; import static org.mockito.Mockito.doThrow; @@ -68,7 +67,6 @@ import static org.mockito.Mockito.reset; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verifyNoMoreInteractions; -import static org.mockito.Mockito.when; public class MetadataRolloverServiceTests extends ESTestCase { @@ -282,29 +280,17 @@ public void testDataStreamValidation() throws IOException { public void testGenerateRolloverIndexName() { String invalidIndexName = randomAlphaOfLength(10) + "A"; IndexNameExpressionResolver indexNameExpressionResolver = TestIndexNameExpressionResolver.newInstance(); - expectThrows( - IllegalArgumentException.class, - () -> MetadataRolloverService.generateRolloverIndexName(invalidIndexName, indexNameExpressionResolver) - ); + expectThrows(IllegalArgumentException.class, () -> MetadataRolloverService.generateRolloverIndexName(invalidIndexName)); int num = randomIntBetween(0, 100); final String indexPrefix = randomAlphaOfLength(10); String indexEndingInNumbers = indexPrefix + "-" + num; assertThat( - MetadataRolloverService.generateRolloverIndexName(indexEndingInNumbers, indexNameExpressionResolver), + MetadataRolloverService.generateRolloverIndexName(indexEndingInNumbers), equalTo(indexPrefix + "-" + String.format(Locale.ROOT, "%06d", num + 1)) ); - assertThat( - MetadataRolloverService.generateRolloverIndexName("index-name-1", indexNameExpressionResolver), - equalTo("index-name-000002") - ); - assertThat( - MetadataRolloverService.generateRolloverIndexName("index-name-2", indexNameExpressionResolver), - equalTo("index-name-000003") - ); - assertEquals( - "", - MetadataRolloverService.generateRolloverIndexName("", indexNameExpressionResolver) - ); + assertThat(MetadataRolloverService.generateRolloverIndexName("index-name-1"), equalTo("index-name-000002")); + assertThat(MetadataRolloverService.generateRolloverIndexName("index-name-2"), equalTo("index-name-000003")); + assertEquals("", MetadataRolloverService.generateRolloverIndexName("")); } public void testCreateIndexRequest() { @@ -703,13 +689,10 @@ public void testValidation() throws Exception { MetadataCreateIndexService createIndexService = mock(MetadataCreateIndexService.class); MetadataIndexAliasesService metadataIndexAliasesService = mock(MetadataIndexAliasesService.class); - IndexNameExpressionResolver mockIndexNameExpressionResolver = mock(IndexNameExpressionResolver.class); - when(mockIndexNameExpressionResolver.resolveDateMathExpression(any())).then(returnsFirstArg()); MetadataRolloverService rolloverService = new MetadataRolloverService( null, createIndexService, metadataIndexAliasesService, - mockIndexNameExpressionResolver, EmptySystemIndices.INSTANCE ); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java index 54c1d0e3f6780..0fc3b77660bb7 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java @@ -227,7 +227,6 @@ public void testConditionEvaluationWhenAliasToWriteAndReadIndicesConsidersOnlyPr final ThreadPool mockThreadPool = mock(ThreadPool.class); final MetadataCreateIndexService mockCreateIndexService = mock(MetadataCreateIndexService.class); final IndexNameExpressionResolver mockIndexNameExpressionResolver = mock(IndexNameExpressionResolver.class); - when(mockIndexNameExpressionResolver.resolveDateMathExpression(any())).thenReturn("logs-index-000003"); final ActionFilters mockActionFilters = mock(ActionFilters.class); final MetadataIndexAliasesService mdIndexAliasesService = mock(MetadataIndexAliasesService.class); @@ -271,7 +270,6 @@ public void testConditionEvaluationWhenAliasToWriteAndReadIndicesConsidersOnlyPr mockThreadPool, mockCreateIndexService, mdIndexAliasesService, - mockIndexNameExpressionResolver, EmptySystemIndices.INSTANCE ); final TransportRolloverAction transportRolloverAction = new TransportRolloverAction( diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DateMathExpressionResolverTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DateMathExpressionResolverTests.java index 5f9214494ac12..7151d8b2b0fa5 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DateMathExpressionResolverTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DateMathExpressionResolverTests.java @@ -33,7 +33,6 @@ public class DateMathExpressionResolverTests extends ESTestCase { - private final DateMathExpressionResolver expressionResolver = new DateMathExpressionResolver(); private final Context context = new Context( ClusterState.builder(new ClusterName("_name")).build(), IndicesOptions.strictExpand(), @@ -55,7 +54,7 @@ public void testNormal() throws Exception { for (int i = 0; i < numIndexExpressions; i++) { indexExpressions.add(randomAlphaOfLength(10)); } - List result = expressionResolver.resolve(context, indexExpressions); + List result = DateMathExpressionResolver.resolve(context, indexExpressions); assertThat(result.size(), equalTo(indexExpressions.size())); for (int i = 0; i < indexExpressions.size(); i++) { assertThat(result.get(i), equalTo(indexExpressions.get(i))); @@ -64,7 +63,7 @@ public void testNormal() throws Exception { public void testExpression() throws Exception { List indexExpressions = Arrays.asList("<.marvel-{now}>", "<.watch_history-{now}>", ""); - List result = expressionResolver.resolve(context, indexExpressions); + List result = DateMathExpressionResolver.resolve(context, indexExpressions); assertThat(result.size(), equalTo(3)); assertThat(result.get(0), equalTo(".marvel-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime())))); assertThat(result.get(1), equalTo(".watch_history-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime())))); @@ -72,18 +71,18 @@ public void testExpression() throws Exception { } public void testEmpty() throws Exception { - List result = expressionResolver.resolve(context, Collections.emptyList()); + List result = DateMathExpressionResolver.resolve(context, Collections.emptyList()); assertThat(result.size(), equalTo(0)); } public void testExpression_Static() throws Exception { - List result = expressionResolver.resolve(context, Arrays.asList("<.marvel-test>")); + List result = DateMathExpressionResolver.resolve(context, Arrays.asList("<.marvel-test>")); assertThat(result.size(), equalTo(1)); assertThat(result.get(0), equalTo(".marvel-test")); } public void testExpression_MultiParts() throws Exception { - List result = expressionResolver.resolve(context, Arrays.asList("<.text1-{now/d}-text2-{now/M}>")); + List result = DateMathExpressionResolver.resolve(context, Arrays.asList("<.text1-{now/d}-text2-{now/M}>")); assertThat(result.size(), equalTo(1)); assertThat( result.get(0), @@ -97,25 +96,25 @@ public void testExpression_MultiParts() throws Exception { } public void testExpression_CustomFormat() throws Exception { - List results = expressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{yyyy.MM.dd}}>")); + List results = DateMathExpressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{yyyy.MM.dd}}>")); assertThat(results.size(), equalTo(1)); assertThat(results.get(0), equalTo(".marvel-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime())))); } public void testExpression_EscapeStatic() throws Exception { - List result = expressionResolver.resolve(context, Arrays.asList("<.mar\\{v\\}el-{now/d}>")); + List result = DateMathExpressionResolver.resolve(context, Arrays.asList("<.mar\\{v\\}el-{now/d}>")); assertThat(result.size(), equalTo(1)); assertThat(result.get(0), equalTo(".mar{v}el-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime())))); } public void testExpression_EscapeDateFormat() throws Exception { - List result = expressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{'\\{year\\}'yyyy}}>")); + List result = DateMathExpressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{'\\{year\\}'yyyy}}>")); assertThat(result.size(), equalTo(1)); assertThat(result.get(0), equalTo(".marvel-" + formatDate("'{year}'yyyy", dateFromMillis(context.getStartTime())))); } public void testExpression_MixedArray() throws Exception { - List result = expressionResolver.resolve( + List result = DateMathExpressionResolver.resolve( context, Arrays.asList("name1", "<.marvel-{now/d}>", "name2", "<.logstash-{now/M{uuuu.MM}}>") ); @@ -162,7 +161,10 @@ public void testExpression_CustomTimeZoneInIndexName() throws Exception { name -> false, name -> false ); - List results = expressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{yyyy.MM.dd|" + timeZone.getId() + "}}>")); + List results = DateMathExpressionResolver.resolve( + context, + Arrays.asList("<.marvel-{now/d{yyyy.MM.dd|" + timeZone.getId() + "}}>") + ); assertThat(results.size(), equalTo(1)); logger.info("timezone: [{}], now [{}], name: [{}]", timeZone, now, results.get(0)); assertThat(results.get(0), equalTo(".marvel-" + formatDate("uuuu.MM.dd", now.withZoneSameInstant(timeZone)))); @@ -171,7 +173,7 @@ public void testExpression_CustomTimeZoneInIndexName() throws Exception { public void testExpressionInvalidUnescaped() throws Exception { Exception e = expectThrows( ElasticsearchParseException.class, - () -> expressionResolver.resolve(context, Arrays.asList("<.mar}vel-{now/d}>")) + () -> DateMathExpressionResolver.resolve(context, Arrays.asList("<.mar}vel-{now/d}>")) ); assertThat(e.getMessage(), containsString("invalid dynamic name expression")); assertThat(e.getMessage(), containsString("invalid character at position [")); @@ -180,7 +182,7 @@ public void testExpressionInvalidUnescaped() throws Exception { public void testExpressionInvalidDateMathFormat() throws Exception { Exception e = expectThrows( ElasticsearchParseException.class, - () -> expressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{}>")) + () -> DateMathExpressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{}>")) ); assertThat(e.getMessage(), containsString("invalid dynamic name expression")); assertThat(e.getMessage(), containsString("date math placeholder is open ended")); @@ -189,7 +191,7 @@ public void testExpressionInvalidDateMathFormat() throws Exception { public void testExpressionInvalidEmptyDateMathFormat() throws Exception { Exception e = expectThrows( ElasticsearchParseException.class, - () -> expressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{}}>")) + () -> DateMathExpressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{}}>")) ); assertThat(e.getMessage(), containsString("invalid dynamic name expression")); assertThat(e.getMessage(), containsString("missing date format")); @@ -198,7 +200,7 @@ public void testExpressionInvalidEmptyDateMathFormat() throws Exception { public void testExpressionInvalidOpenEnded() throws Exception { Exception e = expectThrows( ElasticsearchParseException.class, - () -> expressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d>")) + () -> DateMathExpressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d>")) ); assertThat(e.getMessage(), containsString("invalid dynamic name expression")); assertThat(e.getMessage(), containsString("date math placeholder is open ended")); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java index a84c4f68bc2cd..e6aefa60ded61 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java @@ -1415,65 +1415,6 @@ public void testIsExplicitAllIndicesWildcard() throws Exception { assertThat(IndexNameExpressionResolver.isExplicitAllPattern(Arrays.asList("*")), equalTo(false)); } - public void testIsPatternMatchingAllIndicesExplicitList() throws Exception { - // even though it does identify all indices, it's not a pattern but just an explicit list of them - String[] concreteIndices = new String[] { "index1", "index2", "index3" }; - Metadata metadata = metadataBuilder(concreteIndices); - assertThat(indexNameExpressionResolver.isPatternMatchingAllIndices(metadata, concreteIndices, concreteIndices), equalTo(false)); - } - - public void testIsPatternMatchingAllIndicesOnlyWildcard() throws Exception { - String[] indicesOrAliases = new String[] { "*" }; - String[] concreteIndices = new String[] { "index1", "index2", "index3" }; - Metadata metadata = metadataBuilder(concreteIndices); - assertThat(indexNameExpressionResolver.isPatternMatchingAllIndices(metadata, indicesOrAliases, concreteIndices), equalTo(true)); - } - - public void testIsPatternMatchingAllIndicesMatchingTrailingWildcard() throws Exception { - String[] indicesOrAliases = new String[] { "index*" }; - String[] concreteIndices = new String[] { "index1", "index2", "index3" }; - Metadata metadata = metadataBuilder(concreteIndices); - assertThat(indexNameExpressionResolver.isPatternMatchingAllIndices(metadata, indicesOrAliases, concreteIndices), equalTo(true)); - } - - public void testIsPatternMatchingAllIndicesNonMatchingTrailingWildcard() throws Exception { - String[] indicesOrAliases = new String[] { "index*" }; - String[] concreteIndices = new String[] { "index1", "index2", "index3" }; - String[] allConcreteIndices = new String[] { "index1", "index2", "index3", "a", "b" }; - Metadata metadata = metadataBuilder(allConcreteIndices); - assertThat(indexNameExpressionResolver.isPatternMatchingAllIndices(metadata, indicesOrAliases, concreteIndices), equalTo(false)); - } - - public void testIsPatternMatchingAllIndicesMatchingSingleExclusion() throws Exception { - String[] indicesOrAliases = new String[] { "-index1", "index1" }; - String[] concreteIndices = new String[] { "index1", "index2", "index3" }; - Metadata metadata = metadataBuilder(concreteIndices); - assertThat(indexNameExpressionResolver.isPatternMatchingAllIndices(metadata, indicesOrAliases, concreteIndices), equalTo(true)); - } - - public void testIsPatternMatchingAllIndicesNonMatchingSingleExclusion() throws Exception { - String[] indicesOrAliases = new String[] { "-index1" }; - String[] concreteIndices = new String[] { "index2", "index3" }; - String[] allConcreteIndices = new String[] { "index1", "index2", "index3" }; - Metadata metadata = metadataBuilder(allConcreteIndices); - assertThat(indexNameExpressionResolver.isPatternMatchingAllIndices(metadata, indicesOrAliases, concreteIndices), equalTo(false)); - } - - public void testIsPatternMatchingAllIndicesMatchingTrailingWildcardAndExclusion() throws Exception { - String[] indicesOrAliases = new String[] { "index*", "-index1", "index1" }; - String[] concreteIndices = new String[] { "index1", "index2", "index3" }; - Metadata metadata = metadataBuilder(concreteIndices); - assertThat(indexNameExpressionResolver.isPatternMatchingAllIndices(metadata, indicesOrAliases, concreteIndices), equalTo(true)); - } - - public void testIsPatternMatchingAllIndicesNonMatchingTrailingWildcardAndExclusion() throws Exception { - String[] indicesOrAliases = new String[] { "index*", "-index1" }; - String[] concreteIndices = new String[] { "index2", "index3" }; - String[] allConcreteIndices = new String[] { "index1", "index2", "index3" }; - Metadata metadata = metadataBuilder(allConcreteIndices); - assertThat(indexNameExpressionResolver.isPatternMatchingAllIndices(metadata, indicesOrAliases, concreteIndices), equalTo(false)); - } - public void testIndexOptionsFailClosedIndicesAndAliases() { Metadata.Builder mdBuilder = Metadata.builder() .put( @@ -2912,7 +2853,7 @@ public void testDataStreamsNames() { public void testMathExpressionSupport() { Instant instant = LocalDate.of(2021, 01, 11).atStartOfDay().toInstant(ZoneOffset.UTC); - String resolved = this.indexNameExpressionResolver.resolveDateMathExpression("", instant.toEpochMilli()); + String resolved = IndexNameExpressionResolver.resolveDateMathExpression("", instant.toEpochMilli()); assertEquals(resolved, "a-name-2021-01"); } @@ -2921,7 +2862,7 @@ public void testMathExpressionSupportWithOlderDate() { Instant instant = LocalDate.of(2020, 12, 2).atStartOfDay().toInstant(ZoneOffset.UTC); final String indexName = ""; - String resolved = this.indexNameExpressionResolver.resolveDateMathExpression(indexName, instant.toEpochMilli()); + String resolved = IndexNameExpressionResolver.resolveDateMathExpression(indexName, instant.toEpochMilli()); assertEquals(resolved, "older-date-2020-12"); } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java index 0d8030f429262..939fda916d40b 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -35,7 +35,6 @@ import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexModule; @@ -109,11 +108,9 @@ public class MetadataCreateIndexServiceTests extends ESTestCase { private CreateIndexClusterStateUpdateRequest request; private SearchExecutionContext searchExecutionContext; - private IndexNameExpressionResolver indexNameExpressionResolver; @Before public void setupCreateIndexRequestAndAliasValidator() { - indexNameExpressionResolver = new IndexNameExpressionResolver(new ThreadContext(Settings.EMPTY), EmptySystemIndices.INSTANCE); request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); Settings indexSettings = Settings.builder() .put(SETTING_VERSION_CREATED, Version.CURRENT) @@ -741,7 +738,7 @@ public void testInvalidAliasName() { Metadata.builder().build(), xContentRegistry(), searchExecutionContext, - indexNameExpressionResolver::resolveDateMathExpression, + IndexNameExpressionResolver::resolveDateMathExpression, m -> false ) ); @@ -759,7 +756,7 @@ public void testAliasNameWithMathExpression() { Metadata.builder().build(), xContentRegistry(), searchExecutionContext, - indexNameExpressionResolver::resolveDateMathExpression, + IndexNameExpressionResolver::resolveDateMathExpression, m -> false ); @@ -793,7 +790,7 @@ public void testRequestDataHavePriorityOverTemplateData() throws Exception { Metadata.builder().build(), xContentRegistry(), searchExecutionContext, - indexNameExpressionResolver::resolveDateMathExpression, + IndexNameExpressionResolver::resolveDateMathExpression, m -> false ); @@ -889,7 +886,7 @@ public void testTemplateOrder() throws Exception { Metadata.builder().build(), xContentRegistry(), searchExecutionContext, - indexNameExpressionResolver::resolveDateMathExpression, + IndexNameExpressionResolver::resolveDateMathExpression, m -> false ); @@ -930,7 +927,7 @@ public void testResolvedAliasInTemplate() { Metadata.builder().build(), xContentRegistry(), searchExecutionContext, - indexNameExpressionResolver::resolveDateMathExpression, + IndexNameExpressionResolver::resolveDateMathExpression, m -> false ); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java index ef1fea76bb295..4676703ecdaac 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java @@ -40,37 +40,60 @@ public void testConvertWildcardsJustIndicesTests() { .put(indexBuilder("testYYY")) .put(indexBuilder("kuku")); ClusterState state = ClusterState.builder(new ClusterName("_name")).metadata(mdBuilder).build(); - IndexNameExpressionResolver.WildcardExpressionResolver resolver = new IndexNameExpressionResolver.WildcardExpressionResolver(); IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context( state, IndicesOptions.lenientExpandOpen(), SystemIndexAccessLevel.NONE ); - assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("testXXX"))), equalTo(newHashSet("testXXX"))); - assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "testYYY"))), equalTo(newHashSet("testXXX", "testYYY"))); - assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "ku*"))), equalTo(newHashSet("testXXX", "kuku"))); assertThat( - newHashSet(resolver.resolve(context, Collections.singletonList("test*"))), + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("testXXX"))), + equalTo(newHashSet("testXXX")) + ); + assertThat( + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Arrays.asList("testXXX", "testYYY"))), + equalTo(newHashSet("testXXX", "testYYY")) + ); + assertThat( + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Arrays.asList("testXXX", "ku*"))), + equalTo(newHashSet("testXXX", "kuku")) + ); + assertThat( + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("test*"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY")) ); - assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("testX*"))), equalTo(newHashSet("testXXX", "testXYY"))); assertThat( - newHashSet(resolver.resolve(context, Arrays.asList("testX*", "kuku"))), + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("testX*"))), + equalTo(newHashSet("testXXX", "testXYY")) + ); + assertThat( + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Arrays.asList("testX*", "kuku"))), equalTo(newHashSet("testXXX", "testXYY", "kuku")) ); assertThat( - newHashSet(resolver.resolve(context, Collections.singletonList("*"))), + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("*"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY", "kuku")) ); assertThat( - newHashSet(resolver.resolve(context, Arrays.asList("*", "-kuku"))), + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Arrays.asList("*", "-kuku"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY")) ); - assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "testYYY"))), equalTo(newHashSet("testXXX", "testYYY"))); - assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "-testXXX"))), equalTo(newHashSet("testXXX", "-testXXX"))); - assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "testY*"))), equalTo(newHashSet("testXXX", "testYYY"))); - assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "-testX*"))), equalTo(newHashSet("testXXX"))); + assertThat( + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Arrays.asList("testXXX", "testYYY"))), + equalTo(newHashSet("testXXX", "testYYY")) + ); + assertThat( + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Arrays.asList("testXXX", "-testXXX"))), + equalTo(newHashSet("testXXX", "-testXXX")) + ); + assertThat( + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Arrays.asList("testXXX", "testY*"))), + equalTo(newHashSet("testXXX", "testYYY")) + ); + assertThat( + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Arrays.asList("testXXX", "-testX*"))), + equalTo(newHashSet("testXXX")) + ); } public void testConvertWildcardsTests() { @@ -80,7 +103,6 @@ public void testConvertWildcardsTests() { .put(indexBuilder("testYYY").putAlias(AliasMetadata.builder("alias3"))) .put(indexBuilder("kuku")); ClusterState state = ClusterState.builder(new ClusterName("_name")).metadata(mdBuilder).build(); - IndexNameExpressionResolver.WildcardExpressionResolver resolver = new IndexNameExpressionResolver.WildcardExpressionResolver(); IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context( state, @@ -88,17 +110,23 @@ public void testConvertWildcardsTests() { SystemIndexAccessLevel.NONE ); assertThat( - newHashSet(resolver.resolve(context, Arrays.asList("testYY*", "alias*"))), + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Arrays.asList("testYY*", "alias*"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY")) ); - assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("-kuku"))), equalTo(newHashSet("-kuku"))); - assertThat(newHashSet(resolver.resolve(context, Arrays.asList("test*", "-testYYY"))), equalTo(newHashSet("testXXX", "testXYY"))); assertThat( - newHashSet(resolver.resolve(context, Arrays.asList("testX*", "testYYY"))), + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("-kuku"))), + equalTo(newHashSet("-kuku")) + ); + assertThat( + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Arrays.asList("test*", "-testYYY"))), + equalTo(newHashSet("testXXX", "testXYY")) + ); + assertThat( + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Arrays.asList("testX*", "testYYY"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY")) ); assertThat( - newHashSet(resolver.resolve(context, Arrays.asList("testYYY", "testX*"))), + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Arrays.asList("testYYY", "testX*"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY")) ); } @@ -112,7 +140,6 @@ public void testConvertWildcardsOpenClosedIndicesTests() { .put(indexBuilder("testYYX").state(IndexMetadata.State.CLOSE)) .put(indexBuilder("kuku").state(IndexMetadata.State.OPEN)); ClusterState state = ClusterState.builder(new ClusterName("_name")).metadata(mdBuilder).build(); - IndexNameExpressionResolver.WildcardExpressionResolver resolver = new IndexNameExpressionResolver.WildcardExpressionResolver(); IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context( state, @@ -120,7 +147,7 @@ public void testConvertWildcardsOpenClosedIndicesTests() { SystemIndexAccessLevel.NONE ); assertThat( - newHashSet(resolver.resolve(context, Collections.singletonList("testX*"))), + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("testX*"))), equalTo(newHashSet("testXXX", "testXXY", "testXYY")) ); context = new IndexNameExpressionResolver.Context( @@ -128,13 +155,19 @@ public void testConvertWildcardsOpenClosedIndicesTests() { IndicesOptions.fromOptions(true, true, false, true), SystemIndexAccessLevel.NONE ); - assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("testX*"))), equalTo(newHashSet("testXYY"))); + assertThat( + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("testX*"))), + equalTo(newHashSet("testXYY")) + ); context = new IndexNameExpressionResolver.Context( state, IndicesOptions.fromOptions(true, true, true, false), SystemIndexAccessLevel.NONE ); - assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("testX*"))), equalTo(newHashSet("testXXX", "testXXY"))); + assertThat( + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("testX*"))), + equalTo(newHashSet("testXXX", "testXXY")) + ); } // issue #13334 @@ -148,7 +181,6 @@ public void testMultipleWildcards() { .put(indexBuilder("kukuYYY")); ClusterState state = ClusterState.builder(new ClusterName("_name")).metadata(mdBuilder).build(); - IndexNameExpressionResolver.WildcardExpressionResolver resolver = new IndexNameExpressionResolver.WildcardExpressionResolver(); IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context( state, @@ -156,17 +188,30 @@ public void testMultipleWildcards() { SystemIndexAccessLevel.NONE ); assertThat( - newHashSet(resolver.resolve(context, Collections.singletonList("test*X*"))), + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("test*X*"))), equalTo(newHashSet("testXXX", "testXXY", "testXYY")) ); - assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("test*X*Y"))), equalTo(newHashSet("testXXY", "testXYY"))); - assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("kuku*Y*"))), equalTo(newHashSet("kukuYYY"))); assertThat( - newHashSet(resolver.resolve(context, Collections.singletonList("*Y*"))), + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("test*X*Y"))), + equalTo(newHashSet("testXXY", "testXYY")) + ); + assertThat( + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("kuku*Y*"))), + equalTo(newHashSet("kukuYYY")) + ); + assertThat( + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("*Y*"))), equalTo(newHashSet("testXXY", "testXYY", "testYYY", "kukuYYY")) ); - assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("test*Y*X"))).size(), equalTo(0)); - assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("*Y*X"))).size(), equalTo(0)); + assertThat( + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("test*Y*X"))) + .size(), + equalTo(0) + ); + assertThat( + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("*Y*X"))).size(), + equalTo(0) + ); } public void testAll() { @@ -175,7 +220,6 @@ public void testAll() { .put(indexBuilder("testXYY")) .put(indexBuilder("testYYY")); ClusterState state = ClusterState.builder(new ClusterName("_name")).metadata(mdBuilder).build(); - IndexNameExpressionResolver.WildcardExpressionResolver resolver = new IndexNameExpressionResolver.WildcardExpressionResolver(); IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context( state, @@ -183,7 +227,7 @@ public void testAll() { SystemIndexAccessLevel.NONE ); assertThat( - newHashSet(resolver.resolve(context, Collections.singletonList("_all"))), + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("_all"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY")) ); } @@ -195,7 +239,6 @@ public void testResolveAliases() { .put(indexBuilder("foo_index").state(State.OPEN).putAlias(AliasMetadata.builder("foo_alias"))) .put(indexBuilder("bar_index").state(State.OPEN).putAlias(AliasMetadata.builder("foo_alias"))); ClusterState state = ClusterState.builder(new ClusterName("_name")).metadata(mdBuilder).build(); - IndexNameExpressionResolver.WildcardExpressionResolver resolver = new IndexNameExpressionResolver.WildcardExpressionResolver(); // when ignoreAliases option is not set, WildcardExpressionResolver resolves the provided // expressions against the defined indices and aliases IndicesOptions indicesAndAliasesOptions = IndicesOptions.fromOptions( @@ -229,44 +272,71 @@ public void testResolveAliases() { ); { - List indices = resolver.resolve(indicesAndAliasesContext, Collections.singletonList("foo_a*")); + List indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + indicesAndAliasesContext, + Collections.singletonList("foo_a*") + ); assertThat(indices, containsInAnyOrder("foo_index", "bar_index")); } { - List indices = resolver.resolve(skipAliasesLenientContext, Collections.singletonList("foo_a*")); + List indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + skipAliasesLenientContext, + Collections.singletonList("foo_a*") + ); assertEquals(0, indices.size()); } { IndexNotFoundException infe = expectThrows( IndexNotFoundException.class, - () -> resolver.resolve(skipAliasesStrictContext, Collections.singletonList("foo_a*")) + () -> IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + skipAliasesStrictContext, + Collections.singletonList("foo_a*") + ) ); assertEquals("foo_a*", infe.getIndex().getName()); } { - List indices = resolver.resolve(indicesAndAliasesContext, Collections.singletonList("foo*")); + List indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + indicesAndAliasesContext, + Collections.singletonList("foo*") + ); assertThat(indices, containsInAnyOrder("foo_foo", "foo_index", "bar_index")); } { - List indices = resolver.resolve(skipAliasesLenientContext, Collections.singletonList("foo*")); + List indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + skipAliasesLenientContext, + Collections.singletonList("foo*") + ); assertThat(indices, containsInAnyOrder("foo_foo", "foo_index")); } { - List indices = resolver.resolve(skipAliasesStrictContext, Collections.singletonList("foo*")); + List indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + skipAliasesStrictContext, + Collections.singletonList("foo*") + ); assertThat(indices, containsInAnyOrder("foo_foo", "foo_index")); } { - List indices = resolver.resolve(indicesAndAliasesContext, Collections.singletonList("foo_alias")); + List indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + indicesAndAliasesContext, + Collections.singletonList("foo_alias") + ); assertThat(indices, containsInAnyOrder("foo_alias")); } { - List indices = resolver.resolve(skipAliasesLenientContext, Collections.singletonList("foo_alias")); + List indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + skipAliasesLenientContext, + Collections.singletonList("foo_alias") + ); assertThat(indices, containsInAnyOrder("foo_alias")); } { IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, - () -> resolver.resolve(skipAliasesStrictContext, Collections.singletonList("foo_alias")) + () -> IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + skipAliasesStrictContext, + Collections.singletonList("foo_alias") + ) ); assertEquals( "The provided expression [foo_alias] matches an alias, " + "specify the corresponding concrete indices instead.", @@ -298,8 +368,6 @@ public void testResolveDataStreams() { ClusterState state = ClusterState.builder(new ClusterName("_name")).metadata(mdBuilder).build(); - IndexNameExpressionResolver.WildcardExpressionResolver resolver = new IndexNameExpressionResolver.WildcardExpressionResolver(); - { IndicesOptions indicesAndAliasesOptions = IndicesOptions.fromOptions( randomBoolean(), @@ -318,11 +386,17 @@ public void testResolveDataStreams() { ); // data streams are not included but expression matches the data stream - List indices = resolver.resolve(indicesAndAliasesContext, Collections.singletonList("foo_*")); + List indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + indicesAndAliasesContext, + Collections.singletonList("foo_*") + ); assertThat(indices, containsInAnyOrder("foo_index", "foo_foo", "bar_index")); // data streams are not included and expression doesn't match the data steram - indices = resolver.resolve(indicesAndAliasesContext, Collections.singletonList("bar_*")); + indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + indicesAndAliasesContext, + Collections.singletonList("bar_*") + ); assertThat(indices, containsInAnyOrder("bar_bar", "bar_index")); } @@ -349,7 +423,10 @@ public void testResolveDataStreams() { ); // data stream's corresponding backing indices are resolved - List indices = resolver.resolve(indicesAliasesAndDataStreamsContext, Collections.singletonList("foo_*")); + List indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + indicesAliasesAndDataStreamsContext, + Collections.singletonList("foo_*") + ); assertThat( indices, containsInAnyOrder( @@ -362,7 +439,10 @@ public void testResolveDataStreams() { ); // include all wildcard adds the data stream's backing indices - indices = resolver.resolve(indicesAliasesAndDataStreamsContext, Collections.singletonList("*")); + indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + indicesAliasesAndDataStreamsContext, + Collections.singletonList("*") + ); assertThat( indices, containsInAnyOrder( @@ -400,7 +480,10 @@ public void testResolveDataStreams() { ); // data stream's corresponding backing indices are resolved - List indices = resolver.resolve(indicesAliasesDataStreamsAndHiddenIndices, Collections.singletonList("foo_*")); + List indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + indicesAliasesDataStreamsAndHiddenIndices, + Collections.singletonList("foo_*") + ); assertThat( indices, containsInAnyOrder( @@ -413,7 +496,10 @@ public void testResolveDataStreams() { ); // include all wildcard adds the data stream's backing indices - indices = resolver.resolve(indicesAliasesDataStreamsAndHiddenIndices, Collections.singletonList("*")); + indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + indicesAliasesDataStreamsAndHiddenIndices, + Collections.singletonList("*") + ); assertThat( indices, containsInAnyOrder( diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java index e14e7ed94f83f..37e2cf71d3362 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java @@ -67,7 +67,6 @@ import static org.elasticsearch.test.ESTestCase.randomBoolean; import static org.elasticsearch.test.ESTestCase.randomFrom; import static org.elasticsearch.test.ESTestCase.randomMap; -import static org.mockito.AdditionalAnswers.returnsFirstArg; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -424,8 +423,6 @@ public static MetadataRolloverService getMetadataRolloverService( mappingLookup = MappingLookup.fromMappers(mapping, List.of(dtfm, dateFieldMapper), List.of(), List.of()); } IndicesService indicesService = mockIndicesServices(mappingLookup); - IndexNameExpressionResolver mockIndexNameExpressionResolver = mock(IndexNameExpressionResolver.class); - when(mockIndexNameExpressionResolver.resolveDateMathExpression(any())).then(returnsFirstArg()); ShardLimitValidator shardLimitValidator = new ShardLimitValidator(Settings.EMPTY, clusterService); MetadataCreateIndexService createIndexService = new MetadataCreateIndexService( @@ -443,13 +440,7 @@ public static MetadataRolloverService getMetadataRolloverService( new IndexSettingProviders(providers) ); MetadataIndexAliasesService indexAliasesService = new MetadataIndexAliasesService(clusterService, indicesService, null, registry); - return new MetadataRolloverService( - testThreadPool, - createIndexService, - indexAliasesService, - mockIndexNameExpressionResolver, - EmptySystemIndices.INSTANCE - ); + return new MetadataRolloverService(testThreadPool, createIndexService, indexAliasesService, EmptySystemIndices.INSTANCE); } private static MetadataFieldMapper getDataStreamTimestampFieldMapper() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/GenerateSnapshotNameStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/GenerateSnapshotNameStep.java index dc0932a805467..5d860163ddd40 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/GenerateSnapshotNameStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/GenerateSnapshotNameStep.java @@ -39,9 +39,6 @@ public class GenerateSnapshotNameStep extends ClusterStateActionStep { private static final Logger logger = LogManager.getLogger(GenerateSnapshotNameStep.class); - private static final IndexNameExpressionResolver.DateMathExpressionResolver DATE_MATH_RESOLVER = - new IndexNameExpressionResolver.DateMathExpressionResolver(); - private final String snapshotRepository; public GenerateSnapshotNameStep(StepKey key, StepKey nextStepKey, String snapshotRepository) { @@ -143,7 +140,7 @@ public static String generateSnapshotName(String name) { } public static String generateSnapshotName(String name, IndexNameExpressionResolver.Context context) { - List candidates = DATE_MATH_RESOLVER.resolve(context, Collections.singletonList(name)); + List candidates = IndexNameExpressionResolver.DateMathExpressionResolver.resolve(context, Collections.singletonList(name)); if (candidates.size() != 1) { throw new IllegalStateException("resolving snapshot name " + name + " generated more than one candidate: " + candidates); } diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DateMathExpressionIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DateMathExpressionIntegTests.java index 602cd785112b6..0612f2302404f 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DateMathExpressionIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DateMathExpressionIntegTests.java @@ -17,9 +17,9 @@ import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.Requests; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.indices.TestIndexNameExpressionResolver; import org.elasticsearch.test.SecurityIntegTestCase; import org.elasticsearch.test.SecuritySettingsSourceField; @@ -59,7 +59,7 @@ protected String configRoles() { public void testDateMathExpressionsCanBeAuthorized() throws Exception { final String expression = ""; - final String expectedIndexName = TestIndexNameExpressionResolver.newInstance().resolveDateMathExpression(expression); + final String expectedIndexName = IndexNameExpressionResolver.resolveDateMathExpression(expression); final boolean refeshOnOperation = randomBoolean(); Client client = client().filterWithHeader(Collections.singletonMap("Authorization", basicAuthHeaderValue("user1", USERS_PASSWD))); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java index 7a8ef774dfea3..eb95f66bb3064 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java @@ -177,7 +177,7 @@ ResolvedIndices resolveIndicesAndAliasesWithoutWildcards(String action, IndicesR // shard level requests. final List localIndices = new ArrayList<>(indices.length); for (String name : indices) { - localIndices.add(nameExpressionResolver.resolveDateMathExpression(name)); + localIndices.add(IndexNameExpressionResolver.resolveDateMathExpression(name)); } return new ResolvedIndices(localIndices, List.of()); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java index 522be0172d37e..67e240d3625f6 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java @@ -122,7 +122,6 @@ public class IndicesAndAliasesResolverTests extends ESTestCase { private CompositeRolesStore rolesStore; private Metadata metadata; private IndicesAndAliasesResolver defaultIndicesResolver; - private IndexNameExpressionResolver indexNameExpressionResolver; private Map roleMap; private String todaySuffix; private String tomorrowSuffix; @@ -137,7 +136,7 @@ public void setup() { .put("cluster.remote.other_remote.seeds", "127.0.0.1:" + randomIntBetween(9351, 9399)) .build(); - indexNameExpressionResolver = TestIndexNameExpressionResolver.newInstance(); + IndexNameExpressionResolver indexNameExpressionResolver = TestIndexNameExpressionResolver.newInstance(); DateFormatter dateFormatter = DateFormatter.forPattern("uuuu.MM.dd"); Instant now = Instant.now(Clock.systemUTC()); @@ -168,7 +167,7 @@ public void setup() { .put(indexBuilder("bar").settings(settings)) .put(indexBuilder("bar-closed").state(State.CLOSE).settings(settings)) .put(indexBuilder("bar2").settings(settings)) - .put(indexBuilder(indexNameExpressionResolver.resolveDateMathExpression("")).settings(settings)) + .put(indexBuilder(IndexNameExpressionResolver.resolveDateMathExpression("")).settings(settings)) .put(indexBuilder("-index10").settings(settings)) .put(indexBuilder("-index11").settings(settings)) .put(indexBuilder("-index20").settings(settings)) @@ -1618,7 +1617,7 @@ public void testUnauthorizedDateMathExpressionStrict() { public void testResolveDateMathExpression() { // make the user authorized final String pattern = randomBoolean() ? "" : ""; - String dateTimeIndex = indexNameExpressionResolver.resolveDateMathExpression(""); + String dateTimeIndex = IndexNameExpressionResolver.resolveDateMathExpression(""); String[] authorizedIndices = new String[] { "bar", "bar-closed", "foofoobar", "foofoo", "missing", "foofoo-closed", dateTimeIndex }; roleMap.put( "role", @@ -1677,7 +1676,7 @@ public void testAliasDateMathExpressionNotSupported() { "foofoo", "missing", "foofoo-closed", - indexNameExpressionResolver.resolveDateMathExpression("") }; + IndexNameExpressionResolver.resolveDateMathExpression("") }; roleMap.put( "role", new RoleDescriptor( From d8044bb3b3fb5d910a2f3c91eb712b7e8cc3d5c7 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 25 Jan 2022 11:53:13 +0100 Subject: [PATCH 011/100] Fix testMasterFailoverDuringStaleIndicesCleanup (#83018) We could run into the edge case where the delete randomly gets queued after the snapshot so we don't trigger a stale index cleanup as expected, never try to delete any data files and thus fail on waiting for a block on data files. Closes #83014 --- .../org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java index 046c9e5140954..f69cc0daa0ac8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java @@ -1083,9 +1083,10 @@ public void testMasterFailoverDuringStaleIndicesCleanup() throws Exception { masterName ); + // wait for the delete to show up in the CS so that the below snapshot is queued after it for sure + awaitNDeletionsInProgress(1); final ActionFuture snapshotFuture = startFullSnapshotFromDataNode(repoName, "new-full-snapshot"); waitForBlock(masterName, repoName); - awaitNDeletionsInProgress(1); awaitNumberOfSnapshotsInProgress(1); networkDisruption.startDisrupting(); ensureStableCluster(3, dataNode); From 533f6e0eb8fe84c4fcb3b8de58ca125af8f0190d Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Tue, 25 Jan 2022 11:56:51 +0100 Subject: [PATCH 012/100] Emit trace.id into audit logs (#82849) since #74210 ES is emitting trace.id into its logs, but it did not emit it into audit logs. This commit adds trace.id into audit logging. --- docs/changelog/82849.yaml | 5 ++ .../en/security/auditing/event-types.asciidoc | 15 ++-- .../core/src/main/config/log4j2.properties | 2 + .../audit/logfile/LoggingAuditTrail.java | 82 +++++++------------ .../audit/logfile/LoggingAuditTrailTests.java | 61 ++++++++++++-- 5 files changed, 100 insertions(+), 65 deletions(-) create mode 100644 docs/changelog/82849.yaml diff --git a/docs/changelog/82849.yaml b/docs/changelog/82849.yaml new file mode 100644 index 0000000000000..4b3bdcc16fac2 --- /dev/null +++ b/docs/changelog/82849.yaml @@ -0,0 +1,5 @@ +pr: 82849 +summary: Emit `trace.id` into audit logs +area: "Audit" +type: enhancement +issues: [] diff --git a/x-pack/docs/en/security/auditing/event-types.asciidoc b/x-pack/docs/en/security/auditing/event-types.asciidoc index 9601ac0090262..2f92ee66c0c40 100644 --- a/x-pack/docs/en/security/auditing/event-types.asciidoc +++ b/x-pack/docs/en/security/auditing/event-types.asciidoc @@ -6,7 +6,7 @@ When you are <>, a single client request might generate multiple audit events, across multiple cluster nodes. The common `request.id` attribute can be used to correlate the associated events. -Use the <> +Use the <> setting in `elasticsearch.yml` to specify the kind of events you want to include in the auditing output. @@ -534,8 +534,8 @@ The following list shows attributes that are common to all audit event types: In addition, if `event.type` equals <>, the `event.action` attribute takes one of the following values: `put_user`, `change_password`, `put_role`, `put_role_mapping`, -`change_enable_user`, `change_disable_user`, `put_privileges`, `create_apikey`, -`delete_user`, `delete_role`, `delete_role_mapping`, `invalidate_apikeys` or +`change_enable_user`, `change_disable_user`, `put_privileges`, `create_apikey`, +`delete_user`, `delete_role`, `delete_role_mapping`, `invalidate_apikeys` or `delete_privileges`. `request.id` :: A synthetic identifier that can be used to correlate the events @@ -557,9 +557,12 @@ show more details about the requesting client: `transport` (request was received on the transport channel), or `local_node` (the local node issued the request). `opaque_id` :: The value of the `X-Opaque-Id` HTTP header (if present) of - the request associated with this event. This header can - be used freely by the client to mark API calls, as it has - no semantics in Elasticsearch. + the request associated with this event. + See more: <> +`trace_id` :: The identifier extracted from the `traceparent` HTTP header + (if present) of the request associated with this event. + It allows to surface audit logs into the Trace Logs feature + of Elastic APM. `x_forwarded_for` :: The verbatim value of the `X-Forwarded-For` HTTP request header (if present) of the request associated with the audit event. This header is commonly added by proxies diff --git a/x-pack/plugin/core/src/main/config/log4j2.properties b/x-pack/plugin/core/src/main/config/log4j2.properties index 3df7f338aff0b..6c1d04596246e 100644 --- a/x-pack/plugin/core/src/main/config/log4j2.properties +++ b/x-pack/plugin/core/src/main/config/log4j2.properties @@ -37,6 +37,7 @@ appender.audit_rolling.layout.pattern = {\ %varsNotEmpty{, "request.name":"%enc{%map{request.name}}{JSON}"}\ %varsNotEmpty{, "indices":%map{indices}}\ %varsNotEmpty{, "opaque_id":"%enc{%map{opaque_id}}{JSON}"}\ + %varsNotEmpty{, "trace.id":"%enc{%map{trace.id}}{JSON}"}\ %varsNotEmpty{, "x_forwarded_for":"%enc{%map{x_forwarded_for}}{JSON}"}\ %varsNotEmpty{, "transport.profile":"%enc{%map{transport.profile}}{JSON}"}\ %varsNotEmpty{, "rule":"%enc{%map{rule}}{JSON}"}\ @@ -76,6 +77,7 @@ appender.audit_rolling.layout.pattern = {\ # "request.name" if the event is in connection to a transport message this is the name of the request class, similar to how rest requests are identified by the url path (internal) # "indices" the array of indices that the "action" is acting upon # "opaque_id" opaque value conveyed by the "X-Opaque-Id" request header +# "trace_id" an identifier conveyed by the part of "traceparent" request header # "x_forwarded_for" the addresses from the "X-Forwarded-For" request header, as a verbatim string value (not an array) # "transport.profile" name of the transport profile in case this is a "connection_granted" or "connection_denied" event # "rule" name of the applied rule if the "origin.type" is "ip_filter" diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java index 1c453bf665d1a..0d9d820f4bfa7 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java @@ -180,6 +180,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener { public static final String TRANSPORT_PROFILE_FIELD_NAME = "transport.profile"; public static final String RULE_FIELD_NAME = "rule"; public static final String OPAQUE_ID_FIELD_NAME = "opaque_id"; + public static final String TRACE_ID_FIELD_NAME = "trace.id"; public static final String X_FORWARDED_FOR_FIELD_NAME = "x_forwarded_for"; // the fields below are used exclusively for "security_config_change" type of events, and show the configuration // object taking effect; it could be creating a new, or updating an existing configuration @@ -457,8 +458,7 @@ public void authenticationSuccess(String requestId, Authentication authenticatio .withAuthentication(authentication) .withRestOrigin(request) .withRequestBody(request) - .withOpaqueId(threadContext) - .withXForwardedFor(threadContext) + .withThreadContext(threadContext) .build(); } } @@ -486,8 +486,7 @@ public void authenticationSuccess(String requestId, Authentication authenticatio .withAuthentication(authentication) .withRestOrTransportOrigin(transportRequest, threadContext) .with(INDICES_FIELD_NAME, indices.orElse(null)) - .withOpaqueId(threadContext) - .withXForwardedFor(threadContext) + .withThreadContext(threadContext) .build(); } } @@ -506,8 +505,7 @@ public void anonymousAccessDenied(String requestId, String action, TransportRequ .withRequestId(requestId) .withRestOrTransportOrigin(transportRequest, threadContext) .with(INDICES_FIELD_NAME, indices.orElse(null)) - .withOpaqueId(threadContext) - .withXForwardedFor(threadContext) + .withThreadContext(threadContext) .build(); } } @@ -523,8 +521,7 @@ public void anonymousAccessDenied(String requestId, RestRequest request) { .withRestOrigin(request) .withRequestBody(request) .withRequestId(requestId) - .withOpaqueId(threadContext) - .withXForwardedFor(threadContext) + .withThreadContext(threadContext) .build(); } } @@ -543,8 +540,7 @@ public void authenticationFailed(String requestId, AuthenticationToken token, St .withRequestId(requestId) .withRestOrTransportOrigin(transportRequest, threadContext) .with(INDICES_FIELD_NAME, indices.orElse(null)) - .withOpaqueId(threadContext) - .withXForwardedFor(threadContext); + .withThreadContext(threadContext); if (token instanceof ServiceAccountToken) { logEntryBuilder.with(SERVICE_TOKEN_NAME_FIELD_NAME, ((ServiceAccountToken) token).getTokenName()); } @@ -562,8 +558,7 @@ public void authenticationFailed(String requestId, RestRequest request) { .withRestOrigin(request) .withRequestBody(request) .withRequestId(requestId) - .withOpaqueId(threadContext) - .withXForwardedFor(threadContext) + .withThreadContext(threadContext) .build(); } } @@ -581,8 +576,7 @@ public void authenticationFailed(String requestId, String action, TransportReque .withRequestId(requestId) .withRestOrTransportOrigin(transportRequest, threadContext) .with(INDICES_FIELD_NAME, indices.orElse(null)) - .withOpaqueId(threadContext) - .withXForwardedFor(threadContext) + .withThreadContext(threadContext) .build(); } } @@ -600,8 +594,7 @@ public void authenticationFailed(String requestId, AuthenticationToken token, Re .withRestOrigin(request) .withRequestBody(request) .withRequestId(requestId) - .withOpaqueId(threadContext) - .withXForwardedFor(threadContext); + .withThreadContext(threadContext); if (token instanceof ServiceAccountToken) { logEntryBuilder.with(SERVICE_TOKEN_NAME_FIELD_NAME, ((ServiceAccountToken) token).getTokenName()); } @@ -630,8 +623,7 @@ public void authenticationFailed( .withRequestId(requestId) .withRestOrTransportOrigin(transportRequest, threadContext) .with(INDICES_FIELD_NAME, indices.orElse(null)) - .withOpaqueId(threadContext) - .withXForwardedFor(threadContext) + .withThreadContext(threadContext) .build(); } } @@ -650,8 +642,7 @@ public void authenticationFailed(String requestId, String realm, AuthenticationT .withRestOrigin(request) .withRequestBody(request) .withRequestId(requestId) - .withOpaqueId(threadContext) - .withXForwardedFor(threadContext) + .withThreadContext(threadContext) .build(); } } @@ -687,8 +678,7 @@ public void accessGranted( .withAuthentication(authentication) .withRestOrTransportOrigin(msg, threadContext) .with(INDICES_FIELD_NAME, indices.orElse(null)) - .withOpaqueId(threadContext) - .withXForwardedFor(threadContext) + .withThreadContext(threadContext) .with(authorizationInfo.asMap()) .build(); } @@ -795,8 +785,7 @@ public void explicitIndexAccessEvent( .withRequestId(requestId) .withAuthentication(authentication) .with(INDICES_FIELD_NAME, indices) - .withOpaqueId(threadContext) - .withXForwardedFor(threadContext) + .withThreadContext(threadContext) .with(authorizationInfo.asMap()); final InetSocketAddress restAddress = RemoteHostHeader.restRemoteAddress(threadContext); if (restAddress != null) { @@ -841,8 +830,7 @@ public void accessDenied( .withRestOrTransportOrigin(transportRequest, threadContext) .with(INDICES_FIELD_NAME, indices.orElse(null)) .with(authorizationInfo.asMap()) - .withOpaqueId(threadContext) - .withXForwardedFor(threadContext) + .withThreadContext(threadContext) .build(); } } @@ -857,8 +845,7 @@ public void tamperedRequest(String requestId, RestRequest request) { .withRestOrigin(request) .withRequestBody(request) .withRequestId(requestId) - .withOpaqueId(threadContext) - .withXForwardedFor(threadContext) + .withThreadContext(threadContext) .build(); } } @@ -876,8 +863,7 @@ public void tamperedRequest(String requestId, String action, TransportRequest tr .withRequestId(requestId) .withRestOrTransportOrigin(transportRequest, threadContext) .with(INDICES_FIELD_NAME, indices.orElse(null)) - .withOpaqueId(threadContext) - .withXForwardedFor(threadContext) + .withThreadContext(threadContext) .build(); } } @@ -906,8 +892,7 @@ public void tamperedRequest(String requestId, Authentication authentication, Str .withRestOrTransportOrigin(transportRequest, threadContext) .withAuthentication(authentication) .with(INDICES_FIELD_NAME, indices.orElse(null)) - .withOpaqueId(threadContext) - .withXForwardedFor(threadContext) + .withThreadContext(threadContext) .build(); } } @@ -925,8 +910,7 @@ public void connectionGranted(InetAddress inetAddress, String profile, SecurityI .with(ORIGIN_ADDRESS_FIELD_NAME, NetworkAddress.format(inetAddress)) .with(TRANSPORT_PROFILE_FIELD_NAME, profile) .with(RULE_FIELD_NAME, rule.toString()) - .withOpaqueId(threadContext) - .withXForwardedFor(threadContext) + .withThreadContext(threadContext) .build(); } } @@ -943,8 +927,7 @@ public void connectionDenied(InetAddress inetAddress, String profile, SecurityIp .with(ORIGIN_ADDRESS_FIELD_NAME, NetworkAddress.format(inetAddress)) .with(TRANSPORT_PROFILE_FIELD_NAME, profile) .with(RULE_FIELD_NAME, rule.toString()) - .withOpaqueId(threadContext) - .withXForwardedFor(threadContext) + .withThreadContext(threadContext) .build(); } } @@ -979,8 +962,7 @@ public void runAsGranted( .withRestOrTransportOrigin(transportRequest, threadContext) .with(INDICES_FIELD_NAME, indices.orElse(null)) .with(authorizationInfo.asMap()) - .withOpaqueId(threadContext) - .withXForwardedFor(threadContext) + .withThreadContext(threadContext) .build(); } } @@ -1016,8 +998,7 @@ public void runAsDenied( .withRestOrTransportOrigin(transportRequest, threadContext) .with(INDICES_FIELD_NAME, indices.orElse(null)) .with(authorizationInfo.asMap()) - .withOpaqueId(threadContext) - .withXForwardedFor(threadContext) + .withThreadContext(threadContext) .build(); } } @@ -1045,8 +1026,7 @@ public void runAsDenied(String requestId, Authentication authentication, RestReq .withRestOrigin(request) .withRequestBody(request) .withRequestId(requestId) - .withOpaqueId(threadContext) - .withXForwardedFor(threadContext) + .withThreadContext(threadContext) .build(); } } @@ -1473,20 +1453,18 @@ LogEntryBuilder withRequestId(String requestId) { return this; } - LogEntryBuilder withOpaqueId(ThreadContext threadContext) { - final String opaqueId = threadContext.getHeader(Task.X_OPAQUE_ID_HTTP_HEADER); - if (opaqueId != null) { - logEntry.with(OPAQUE_ID_FIELD_NAME, opaqueId); - } + LogEntryBuilder withThreadContext(ThreadContext threadContext) { + setThreadContextField(threadContext, AuditTrail.X_FORWARDED_FOR_HEADER, X_FORWARDED_FOR_FIELD_NAME); + setThreadContextField(threadContext, Task.X_OPAQUE_ID_HTTP_HEADER, OPAQUE_ID_FIELD_NAME); + setThreadContextField(threadContext, Task.TRACE_ID, TRACE_ID_FIELD_NAME); return this; } - LogEntryBuilder withXForwardedFor(ThreadContext threadContext) { - final String xForwardedFor = threadContext.getHeader(AuditTrail.X_FORWARDED_FOR_HEADER); - if (xForwardedFor != null) { - logEntry.with(X_FORWARDED_FOR_FIELD_NAME, xForwardedFor); + private void setThreadContextField(ThreadContext threadContext, String threadContextFieldName, String auditLogFieldName) { + final String fieldValue = threadContext.getHeader(threadContextFieldName); + if (fieldValue != null) { + logEntry.with(auditLogFieldName, fieldValue); } - return this; } LogEntryBuilder withAuthentication(Authentication authentication) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java index c64195c569571..d4f76093f56f2 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java @@ -325,6 +325,9 @@ public void init() throws Exception { if (randomBoolean()) { threadContext.putHeader(Task.X_OPAQUE_ID_HTTP_HEADER, randomAlphaOfLengthBetween(1, 4)); } + if (randomBoolean()) { + threadContext.putHeader(Task.TRACE_ID, randomAlphaOfLength(32)); + } if (randomBoolean()) { threadContext.putHeader( AuditTrail.X_FORWARDED_FOR_HEADER, @@ -1306,6 +1309,7 @@ public void testAnonymousAccessDeniedTransport() throws Exception { indicesRequest(request, checkedFields, checkedArrayFields); restOrTransportOrigin(request, threadContext, checkedFields); opaqueId(threadContext, checkedFields); + traceId(threadContext, checkedFields); forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.map(), checkedArrayFields.map()); @@ -1341,6 +1345,7 @@ public void testAnonymousAccessDeniedRest() throws Exception { checkedFields.put(LoggingAuditTrail.REQUEST_BODY_FIELD_NAME, expectedMessage); } opaqueId(threadContext, checkedFields); + traceId(threadContext, checkedFields); forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.map()); @@ -1373,6 +1378,7 @@ public void testAuthenticationFailed() throws Exception { restOrTransportOrigin(request, threadContext, checkedFields); indicesRequest(request, checkedFields, checkedArrayFields); opaqueId(threadContext, checkedFields); + traceId(threadContext, checkedFields); forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.map(), checkedArrayFields.map()); @@ -1400,6 +1406,7 @@ public void testAuthenticationFailedNoToken() throws Exception { restOrTransportOrigin(request, threadContext, checkedFields); indicesRequest(request, checkedFields, checkedArrayFields); opaqueId(threadContext, checkedFields); + traceId(threadContext, checkedFields); forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.map(), checkedArrayFields.map()); @@ -1447,6 +1454,7 @@ public void testAuthenticationFailedRest() throws Exception { checkedFields.put(LoggingAuditTrail.URL_QUERY_FIELD_NAME, "foo=bar"); } opaqueId(threadContext, checkedFields); + traceId(threadContext, checkedFields); forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.map()); @@ -1489,6 +1497,7 @@ public void testAuthenticationFailedRestNoToken() throws Exception { checkedFields.put(LoggingAuditTrail.URL_QUERY_FIELD_NAME, "bar=baz"); } opaqueId(threadContext, checkedFields); + traceId(threadContext, checkedFields); forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.map()); @@ -1526,6 +1535,7 @@ public void testAuthenticationFailedRealm() throws Exception { restOrTransportOrigin(request, threadContext, checkedFields); indicesRequest(request, checkedFields, checkedArrayFields); opaqueId(threadContext, checkedFields); + traceId(threadContext, checkedFields); forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.map(), checkedArrayFields.map()); } @@ -1570,6 +1580,7 @@ public void testAuthenticationFailedRealmRest() throws Exception { checkedFields.put(LoggingAuditTrail.URL_QUERY_FIELD_NAME, "_param=baz"); } opaqueId(threadContext, checkedFields); + traceId(threadContext, checkedFields); forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.map()); } @@ -1595,6 +1606,7 @@ public void testAccessGranted() throws Exception { restOrTransportOrigin(request, threadContext, checkedFields); indicesRequest(request, checkedFields, checkedArrayFields); opaqueId(threadContext, checkedFields); + traceId(threadContext, checkedFields); forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.map(), checkedArrayFields.map()); @@ -1615,6 +1627,7 @@ public void testAccessGranted() throws Exception { restOrTransportOrigin(request, threadContext, checkedFields); indicesRequest(request, checkedFields, checkedArrayFields); opaqueId(threadContext, checkedFields); + traceId(threadContext, checkedFields); forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.map(), checkedArrayFields.map()); @@ -1719,6 +1732,7 @@ public void testSystemAccessGranted() throws Exception { restOrTransportOrigin(request, threadContext, checkedFields); indicesRequest(request, checkedFields, checkedArrayFields); opaqueId(threadContext, checkedFields); + traceId(threadContext, checkedFields); forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.map(), checkedArrayFields.map()); clearLog(); @@ -1746,6 +1760,7 @@ public void testSystemAccessGranted() throws Exception { authentication(authentication, checkedFields); restOrTransportOrigin(request, threadContext, checkedFields); opaqueId(threadContext, checkedFields); + traceId(threadContext, checkedFields); forwardedFor(threadContext, checkedFields); if (index != null) { checkedArrayFields.put(LoggingAuditTrail.INDICES_FIELD_NAME, new String[] { index }); @@ -1782,6 +1797,7 @@ public void testAccessGrantedInternalSystemAction() throws Exception { restOrTransportOrigin(request, threadContext, checkedFields); indicesRequest(request, checkedFields, checkedArrayFields); opaqueId(threadContext, checkedFields); + traceId(threadContext, checkedFields); forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.map(), checkedArrayFields.map()); } @@ -1806,6 +1822,7 @@ public void testAccessGrantedInternalSystemActionNonSystemUser() throws Exceptio restOrTransportOrigin(request, threadContext, checkedFields); indicesRequest(request, checkedFields, checkedArrayFields); opaqueId(threadContext, checkedFields); + traceId(threadContext, checkedFields); forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.map(), checkedArrayFields.map()); @@ -1826,6 +1843,7 @@ public void testAccessGrantedInternalSystemActionNonSystemUser() throws Exceptio restOrTransportOrigin(request, threadContext, checkedFields); indicesRequest(request, checkedFields, checkedArrayFields); opaqueId(threadContext, checkedFields); + traceId(threadContext, checkedFields); forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.map(), checkedArrayFields.map()); @@ -1863,6 +1881,7 @@ public void testAccessDenied() throws Exception { restOrTransportOrigin(request, threadContext, checkedFields); indicesRequest(request, checkedFields, checkedArrayFields); opaqueId(threadContext, checkedFields); + traceId(threadContext, checkedFields); forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.map(), checkedArrayFields.map()); @@ -1883,6 +1902,7 @@ public void testAccessDenied() throws Exception { restOrTransportOrigin(request, threadContext, checkedFields); indicesRequest(request, checkedFields, checkedArrayFields); opaqueId(threadContext, checkedFields); + traceId(threadContext, checkedFields); forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.map(), checkedArrayFields.map()); @@ -1922,6 +1942,7 @@ public void testTamperedRequestRest() throws Exception { checkedFields.put(LoggingAuditTrail.URL_QUERY_FIELD_NAME, "_param=baz"); } opaqueId(threadContext, checkedFields); + traceId(threadContext, checkedFields); forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.map()); @@ -1949,6 +1970,7 @@ public void testTamperedRequest() throws Exception { restOrTransportOrigin(request, threadContext, checkedFields); indicesRequest(request, checkedFields, checkedArrayFields); opaqueId(threadContext, checkedFields); + traceId(threadContext, checkedFields); forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.map(), checkedArrayFields.map()); @@ -1978,6 +2000,7 @@ public void testTamperedRequestWithUser() throws Exception { restOrTransportOrigin(request, threadContext, checkedFields); indicesRequest(request, checkedFields, checkedArrayFields); opaqueId(threadContext, checkedFields); + traceId(threadContext, checkedFields); forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.map(), checkedArrayFields.map()); @@ -1997,6 +2020,7 @@ public void testTamperedRequestWithUser() throws Exception { restOrTransportOrigin(request, threadContext, checkedFields); indicesRequest(request, checkedFields, checkedArrayFields); opaqueId(threadContext, checkedFields); + traceId(threadContext, checkedFields); forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.map(), checkedArrayFields.map()); @@ -2028,6 +2052,7 @@ public void testConnectionDenied() throws Exception { .put(LoggingAuditTrail.TRANSPORT_PROFILE_FIELD_NAME, profile) .put(LoggingAuditTrail.RULE_FIELD_NAME, "deny _all"); opaqueId(threadContext, checkedFields); + traceId(threadContext, checkedFields); forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.map()); @@ -2066,6 +2091,7 @@ public void testConnectionGranted() throws Exception { .put(LoggingAuditTrail.TRANSPORT_PROFILE_FIELD_NAME, profile) .put(LoggingAuditTrail.RULE_FIELD_NAME, "allow default:accept_all"); opaqueId(threadContext, checkedFields); + traceId(threadContext, checkedFields); forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.map()); } @@ -2097,6 +2123,7 @@ public void testRunAsGranted() throws Exception { restOrTransportOrigin(request, threadContext, checkedFields); indicesRequest(request, checkedFields, checkedArrayFields); opaqueId(threadContext, checkedFields); + traceId(threadContext, checkedFields); forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.map(), checkedArrayFields.map()); @@ -2134,6 +2161,7 @@ public void testRunAsDenied() throws Exception { restOrTransportOrigin(request, threadContext, checkedFields); indicesRequest(request, checkedFields, checkedArrayFields); opaqueId(threadContext, checkedFields); + traceId(threadContext, checkedFields); forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.map(), checkedArrayFields.map()); @@ -2185,6 +2213,7 @@ public void testAuthenticationSuccessRest() throws Exception { } authentication(authentication, checkedFields); opaqueId(threadContext, checkedFields); + traceId(threadContext, checkedFields); forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.map()); @@ -2210,6 +2239,7 @@ public void testAuthenticationSuccessRest() throws Exception { } authentication(authentication, checkedFields); opaqueId(threadContext, checkedFields); + traceId(threadContext, checkedFields); forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.map()); } @@ -2238,6 +2268,7 @@ public void testAuthenticationSuccessTransport() throws Exception { restOrTransportOrigin(request, threadContext, checkedFields); indicesRequest(request, checkedFields, checkedArrayFields); opaqueId(threadContext, checkedFields); + traceId(threadContext, checkedFields); forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.map(), checkedArrayFields.map()); @@ -2257,6 +2288,7 @@ public void testAuthenticationSuccessTransport() throws Exception { restOrTransportOrigin(request, threadContext, checkedFields); indicesRequest(request, checkedFields, checkedArrayFields); opaqueId(threadContext, checkedFields); + traceId(threadContext, checkedFields); forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.map(), checkedArrayFields.map()); } @@ -2664,16 +2696,31 @@ private static void authentication(Authentication authentication, MapBuilder checkedFields) { - final String opaqueId = threadContext.getHeader(Task.X_OPAQUE_ID_HTTP_HEADER); - if (opaqueId != null) { - checkedFields.put(LoggingAuditTrail.OPAQUE_ID_FIELD_NAME, opaqueId); - } + setFieldFromThreadContext(threadContext, checkedFields, Task.X_OPAQUE_ID_HTTP_HEADER, LoggingAuditTrail.OPAQUE_ID_FIELD_NAME); + } + + private static void traceId(ThreadContext threadContext, MapBuilder checkedFields) { + setFieldFromThreadContext(threadContext, checkedFields, Task.TRACE_ID, LoggingAuditTrail.TRACE_ID_FIELD_NAME); } private static void forwardedFor(ThreadContext threadContext, MapBuilder checkedFields) { - final String forwardedFor = threadContext.getHeader(AuditTrail.X_FORWARDED_FOR_HEADER); - if (forwardedFor != null) { - checkedFields.put(LoggingAuditTrail.X_FORWARDED_FOR_FIELD_NAME, forwardedFor); + setFieldFromThreadContext( + threadContext, + checkedFields, + AuditTrail.X_FORWARDED_FOR_HEADER, + LoggingAuditTrail.X_FORWARDED_FOR_FIELD_NAME + ); + } + + private static void setFieldFromThreadContext( + ThreadContext threadContext, + MapBuilder checkedFields, + String threadContextFieldName, + String logFieldName + ) { + final String value = threadContext.getHeader(threadContextFieldName); + if (value != null) { + checkedFields.put(logFieldName, value); } } From 002f506de8dba77f50442a418bd08cf187311a8b Mon Sep 17 00:00:00 2001 From: Alan Woodward Date: Tue, 25 Jan 2022 12:21:32 +0000 Subject: [PATCH 013/100] Construct dynamic updates directly via object builders (#81449) Currently, dynamic updates are built in the DocumentParser using a stack of possibly-dynamic object mappers. This logic, spread across a number of static methods, frequently assumes that the parents of a mapper can be found by splitting its name on dots, an assumption that will fail to hold once we allow objects to hold fields that have dots in their names. As a pre-requisite for the dots in field names effort, this commit refactors the construction of dynamic updates into object mapper builders. Now, to build an update, we start with a new dynamic root builder, and then call addUpdate on it with each dynamically built mapper in turn. The builder will examine the mapper and see if it can just add it to its own set of mappers directly; and if not, it will retrieve or build an appropriate intermediate object mapper and recursively call addUpdate on it with the original mapper. As a side-effect of this change, ObjectMapper itself no longer updates its map of child mappers except during construction via merging, and so we can safely replace CopyOnWriteHashMap here. --- .../xcontent/DotExpandingXContentParser.java | 6 +- .../DotExpandingXContentParserTests.java | 8 + .../index/mapper/DocumentParser.java | 209 ++---------------- .../index/mapper/DocumentParserContext.java | 9 +- .../elasticsearch/index/mapper/Mapper.java | 6 + .../index/mapper/NestedObjectMapper.java | 10 + .../index/mapper/ObjectMapper.java | 92 ++++++-- .../index/mapper/RootObjectMapper.java | 38 ++-- .../index/mapper/DocumentParserTests.java | 130 +++++------ .../index/mapper/DynamicMappingTests.java | 5 +- .../FieldAliasMapperValidationTests.java | 2 +- .../index/mapper/MappingLookupTests.java | 2 +- .../index/mapper/MappingParserTests.java | 4 +- .../index/mapper/ObjectMapperMergeTests.java | 15 +- .../index/mapper/ObjectMapperTests.java | 2 +- .../query/SearchExecutionContextTests.java | 2 +- 16 files changed, 209 insertions(+), 331 deletions(-) diff --git a/libs/x-content/src/main/java/org/elasticsearch/xcontent/DotExpandingXContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/xcontent/DotExpandingXContentParser.java index d0318b49b62bc..cc7861bc6d752 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/xcontent/DotExpandingXContentParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/xcontent/DotExpandingXContentParser.java @@ -52,7 +52,11 @@ private void expandDots() throws IOException { if (subpaths.length == 0) { throw new IllegalArgumentException("field name cannot contain only dots: [" + field + "]"); } - if (subpaths.length == 1) { + // Corner case: if the input has a single trailing '.', eg 'field.', then we will get a single + // subpath due to the way String.split() works. We can only return fast here if this is not + // the case + // TODO make this case throw an error instead? https://github.com/elastic/elasticsearch/issues/28948 + if (subpaths.length == 1 && field.endsWith(".") == false) { return; } Token token = delegate().nextToken(); diff --git a/libs/x-content/src/test/java/org/elasticsearch/xcontent/DotExpandingXContentParserTests.java b/libs/x-content/src/test/java/org/elasticsearch/xcontent/DotExpandingXContentParserTests.java index a0c4bf5b4a6bb..e8e29660a4e2a 100644 --- a/libs/x-content/src/test/java/org/elasticsearch/xcontent/DotExpandingXContentParserTests.java +++ b/libs/x-content/src/test/java/org/elasticsearch/xcontent/DotExpandingXContentParserTests.java @@ -53,6 +53,14 @@ public void testEmbeddedValue() throws IOException { } + public void testTrailingDotsAreStripped() throws IOException { + + assertXContentMatches(""" + {"test":{"with":{"dots":"value"}},"nodots":"value"}""", """ + {"test.":{"with.":{"dots":"value"}},"nodots":"value"}"""); + + } + public void testSkipChildren() throws IOException { XContentParser parser = DotExpandingXContentParser.expandDots(createParser(JsonXContent.jsonXContent, """ { "test.with.dots" : "value", "nodots" : "value2" }""")); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index e836b7ebd13dc..8d8945d8eb90d 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -14,7 +14,6 @@ import org.apache.lucene.search.Query; import org.elasticsearch.Version; import org.elasticsearch.common.Explicit; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.xcontent.XContentHelper; @@ -33,7 +32,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collections; -import java.util.Comparator; import java.util.HashMap; import java.util.Iterator; import java.util.LinkedList; @@ -97,7 +95,7 @@ public ParsedDocument parseDocument(SourceToParse source, MappingLookup mappingL context.reorderParentAndGetDocs(), context.sourceToParse().source(), context.sourceToParse().getXContentType(), - createDynamicUpdate(mappingLookup, context.getDynamicMappers(), context.getDynamicRuntimeFields()) + createDynamicUpdate(context) ); } @@ -207,198 +205,20 @@ private static MapperParsingException wrapInMapperParsingException(SourceToParse return new MapperParsingException("failed to parse", e); } - private static String[] splitAndValidatePath(String fullFieldPath) { - if (fullFieldPath.contains(".")) { - String[] parts = fullFieldPath.split("\\."); - if (parts.length == 0) { - throw new IllegalArgumentException("field name cannot contain only dots"); - } - for (String part : parts) { - if (Strings.hasText(part) == false) { - // check if the field name contains only whitespace - if (Strings.isEmpty(part) == false) { - throw new IllegalArgumentException("object field cannot contain only whitespace: ['" + fullFieldPath + "']"); - } - throw new IllegalArgumentException( - "object field starting or ending with a [.] makes object resolution ambiguous: [" + fullFieldPath + "]" - ); - } - } - return parts; - } else { - if (Strings.isEmpty(fullFieldPath)) { - throw new IllegalArgumentException("field name cannot be an empty string"); - } - return new String[] { fullFieldPath }; - } - } - - /** - * Creates a Mapping containing any dynamically added fields, or returns null if there were no dynamic mappings. - */ - static Mapping createDynamicUpdate(MappingLookup mappingLookup, List dynamicMappers, List dynamicRuntimeFields) { - if (dynamicMappers.isEmpty() && dynamicRuntimeFields.isEmpty()) { + static Mapping createDynamicUpdate(DocumentParserContext context) { + if (context.getDynamicMappers().isEmpty() && context.getDynamicRuntimeFields().isEmpty()) { return null; } - RootObjectMapper root; - if (dynamicMappers.isEmpty() == false) { - root = createDynamicUpdate(mappingLookup, dynamicMappers); - root.fixRedundantIncludes(); - } else { - root = mappingLookup.getMapping().getRoot().copyAndReset(); - } - root.addRuntimeFields(dynamicRuntimeFields); - return mappingLookup.getMapping().mappingUpdate(root); - } - - private static RootObjectMapper createDynamicUpdate(MappingLookup mappingLookup, List dynamicMappers) { - - // We build a mapping by first sorting the mappers, so that all mappers containing a common prefix - // will be processed in a contiguous block. When the prefix is no longer seen, we pop the extra elements - // off the stack, merging them upwards into the existing mappers. - dynamicMappers.sort(Comparator.comparing(Mapper::name)); - Iterator dynamicMapperItr = dynamicMappers.iterator(); - List parentMappers = new ArrayList<>(); - Mapper firstUpdate = dynamicMapperItr.next(); - parentMappers.add(createUpdate(mappingLookup.getMapping().getRoot(), splitAndValidatePath(firstUpdate.name()), 0, firstUpdate)); - Mapper previousMapper = null; - while (dynamicMapperItr.hasNext()) { - Mapper newMapper = dynamicMapperItr.next(); - if (previousMapper != null && newMapper.name().equals(previousMapper.name())) { - // We can see the same mapper more than once, for example, if we had foo.bar and foo.baz, where - // foo did not yet exist. This will create 2 copies in dynamic mappings, which should be identical. - // Here we just skip over the duplicates, but we merge them to ensure there are no conflicts. - newMapper.merge(previousMapper); - continue; - } - previousMapper = newMapper; - String[] nameParts = splitAndValidatePath(newMapper.name()); - - // We first need the stack to only contain mappers in common with the previously processed mapper - // For example, if the first mapper processed was a.b.c, and we now have a.d, the stack will contain - // a.b, and we want to merge b back into the stack so it just contains a - int i = removeUncommonMappers(parentMappers, nameParts); - - // Then we need to add back mappers that may already exist within the stack, but are not on it. - // For example, if we processed a.b, followed by an object mapper a.c.d, and now are adding a.c.d.e - // then the stack will only have a on it because we will have already merged a.c.d into the stack. - // So we need to pull a.c, followed by a.c.d, onto the stack so e can be added to the end. - i = expandCommonMappers(parentMappers, nameParts, i); - - // If there are still parents of the new mapper which are not on the stack, we need to pull them - // from the existing mappings. In order to maintain the invariant that the stack only contains - // fields which are updated, we cannot simply add the existing mappers to the stack, since they - // may have other subfields which will not be updated. Instead, we pull the mapper from the existing - // mappings, and build an update with only the new mapper and its parents. This then becomes our - // "new mapper", and can be added to the stack. - if (i < nameParts.length - 1) { - newMapper = createExistingMapperUpdate(parentMappers, nameParts, i, mappingLookup, newMapper); - } - - if (newMapper instanceof ObjectMapper) { - parentMappers.add((ObjectMapper) newMapper); - } else { - addToLastMapper(parentMappers, newMapper, true); - } - } - popMappers(parentMappers, 1, true); - assert parentMappers.size() == 1; - return (RootObjectMapper) parentMappers.get(0); - } - - private static void popMappers(List parentMappers, int keepBefore, boolean merge) { - assert keepBefore >= 1; // never remove the root mapper - // pop off parent mappers not needed by the current mapper, - // merging them backwards since they are immutable - for (int i = parentMappers.size() - 1; i >= keepBefore; --i) { - addToLastMapper(parentMappers, parentMappers.remove(i), merge); + RootObjectMapper.Builder rootBuilder = context.updateRoot(); + for (Mapper mapper : context.getDynamicMappers()) { + rootBuilder.addDynamic(mapper.name(), null, mapper, context); } - } - - /** - * Adds a mapper as an update into the last mapper. If merge is true, the new mapper - * will be merged in with other child mappers of the last parent, otherwise it will be a new update. - */ - private static void addToLastMapper(List parentMappers, Mapper mapper, boolean merge) { - assert parentMappers.size() >= 1; - int lastIndex = parentMappers.size() - 1; - ObjectMapper withNewMapper = parentMappers.get(lastIndex).mappingUpdate(mapper); - if (merge) { - withNewMapper = parentMappers.get(lastIndex).merge(withNewMapper); - } - parentMappers.set(lastIndex, withNewMapper); - } - - /** - * Removes mappers that exist on the stack, but are not part of the path of the current nameParts, - * Returns the next unprocessed index from nameParts. - */ - private static int removeUncommonMappers(List parentMappers, String[] nameParts) { - int keepBefore = 1; - while (keepBefore < parentMappers.size() && parentMappers.get(keepBefore).simpleName().equals(nameParts[keepBefore - 1])) { - ++keepBefore; + for (RuntimeField runtimeField : context.getDynamicRuntimeFields()) { + rootBuilder.addRuntimeField(runtimeField); } - popMappers(parentMappers, keepBefore, true); - return keepBefore - 1; - } - - /** - * Adds mappers from the end of the stack that exist as updates within those mappers. - * Returns the next unprocessed index from nameParts. - */ - private static int expandCommonMappers(List parentMappers, String[] nameParts, int i) { - ObjectMapper last = parentMappers.get(parentMappers.size() - 1); - while (i < nameParts.length - 1 && last.getMapper(nameParts[i]) != null) { - Mapper newLast = last.getMapper(nameParts[i]); - assert newLast instanceof ObjectMapper; - last = (ObjectMapper) newLast; - parentMappers.add(last); - ++i; - } - return i; - } - - /** - * Creates an update for intermediate object mappers that are not on the stack, but parents of newMapper. - */ - private static ObjectMapper createExistingMapperUpdate( - List parentMappers, - String[] nameParts, - int i, - MappingLookup mappingLookup, - Mapper newMapper - ) { - String updateParentName = nameParts[i]; - final ObjectMapper lastParent = parentMappers.get(parentMappers.size() - 1); - if (parentMappers.size() > 1) { - // only prefix with parent mapper if the parent mapper isn't the root (which has a fake name) - updateParentName = lastParent.name() + '.' + nameParts[i]; - } - ObjectMapper updateParent = mappingLookup.objectMappers().get(updateParentName); - assert updateParent != null : updateParentName + " doesn't exist"; - return createUpdate(updateParent, nameParts, i + 1, newMapper); - } - - /** - * Build an update for the parent which will contain the given mapper and any intermediate fields. - */ - private static ObjectMapper createUpdate(ObjectMapper parent, String[] nameParts, int i, Mapper mapper) { - List parentMappers = new ArrayList<>(); - ObjectMapper previousIntermediate = parent; - for (; i < nameParts.length - 1; ++i) { - Mapper intermediate = previousIntermediate.getMapper(nameParts[i]); - assert intermediate != null : "Field " + previousIntermediate.name() + " does not have a subfield " + nameParts[i]; - assert intermediate instanceof ObjectMapper; - parentMappers.add((ObjectMapper) intermediate); - previousIntermediate = (ObjectMapper) intermediate; - } - if (parentMappers.isEmpty() == false) { - // add the new mapper to the stack, and pop down to the original parent level - addToLastMapper(parentMappers, mapper, false); - popMappers(parentMappers, 1, false); - mapper = parentMappers.get(0); - } - return parent.mappingUpdate(mapper); + RootObjectMapper root = rootBuilder.build(MapperBuilderContext.ROOT); + root.fixRedundantIncludes(); + return context.mappingLookup().getMapping().mappingUpdate(root); } static void parseObjectOrNested(DocumentParserContext context, ObjectMapper mapper) throws IOException { @@ -453,6 +273,11 @@ private static void innerParseObject(DocumentParserContext context, ObjectMapper while (token != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = context.parser().currentName(); + if (currentFieldName.isBlank()) { + throw new MapperParsingException( + "Field name cannot contain only whitespace: [" + context.path().pathAsText(currentFieldName) + "]" + ); + } } else if (token == XContentParser.Token.START_OBJECT) { parseObject(context, mapper, currentFieldName); } else if (token == XContentParser.Token.START_ARRAY) { @@ -759,7 +584,7 @@ private static ObjectMapper.Dynamic dynamicOrDefault(ObjectMapper parentMapper, parentMapper = context.mappingLookup().objectMappers().get(parentName); if (parentMapper == null) { // If parentMapper is null, it means the parent of the current mapper is being dynamically created right now - parentMapper = context.getObjectMapper(parentName); + parentMapper = context.getDynamicObjectMapper(parentName); if (parentMapper == null) { // it can still happen that the path is ambiguous and we are not able to locate the parent break; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java index 1dd7de4167da0..67247733e9e4e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java @@ -226,7 +226,7 @@ public final boolean isShadowed(String field) { return mappingLookup.isShadowed(field); } - public final ObjectMapper getObjectMapper(String name) { + public final ObjectMapper getDynamicObjectMapper(String name) { return dynamicObjectMappers.get(name); } @@ -250,6 +250,13 @@ public final List getDynamicRuntimeFields() { */ public abstract Iterable nonRootDocuments(); + /** + * @return a RootObjectMapper.Builder to be used to construct a dynamic mapping update + */ + public final RootObjectMapper.Builder updateRoot() { + return mappingLookup.getMapping().getRoot().newBuilder(indexSettings.getIndexVersionCreated()); + } + public boolean isWithinCopyTo() { return false; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java b/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java index 0ed4dc42478e2..c492cba3e6e4c 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java @@ -8,6 +8,7 @@ package org.elasticsearch.index.mapper; +import org.elasticsearch.common.Strings; import org.elasticsearch.xcontent.ToXContentFragment; import java.util.Map; @@ -65,4 +66,9 @@ public final String simpleName() { * @param mappers a {@link MappingLookup} that can produce references to other mappers */ public abstract void validate(MappingLookup mappers); + + @Override + public String toString() { + return Strings.toString(this); + } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java index e4ba0fc664760..87370aa704f01 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java @@ -137,6 +137,16 @@ public Map getChildren() { return Collections.unmodifiableMap(this.mappers); } + @Override + public ObjectMapper.Builder newBuilder(Version indexVersionCreated) { + NestedObjectMapper.Builder builder = new NestedObjectMapper.Builder(simpleName(), indexVersionCreated); + builder.enabled = enabled; + builder.dynamic = dynamic; + builder.includeInRoot = includeInRoot; + builder.includeInParent = includeInParent; + return builder; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(simpleName()); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java index 4932ef0742418..171d163d70ab1 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java @@ -9,8 +9,8 @@ package org.elasticsearch.index.mapper; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.Version; import org.elasticsearch.common.Explicit; -import org.elasticsearch.common.collect.CopyOnWriteHashMap; import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.xcontent.support.XContentMapValues; @@ -87,6 +87,63 @@ public Builder add(Mapper.Builder builder) { return this; } + Builder addMappers(Map mappers) { + mappers.forEach((name, mapper) -> mappersBuilders.add(new Mapper.Builder(name) { + @Override + public Mapper build(MapperBuilderContext context) { + return mapper; + } + })); + return this; + } + + /** + * Adds a dynamically created Mapper to this builder. + * + * @param name the name of the Mapper, including object prefixes + * @param prefix the object prefix of this mapper + * @param mapper the mapper to add + * @param context the DocumentParserContext in which the mapper has been built + */ + public void addDynamic(String name, String prefix, Mapper mapper, DocumentParserContext context) { + // If the mapper to add has no dots and is therefore + // a leaf mapper, we just add it here + if (name.contains(".") == false) { + mappersBuilders.add(new Mapper.Builder(name) { + @Override + public Mapper build(MapperBuilderContext context) { + return mapper; + } + }); + } + // otherwise we strip off the first object path of the mapper name, load or create + // the relevant object mapper, and then recurse down into it, passing the remainder + // of the mapper name. So for a mapper 'foo.bar.baz', we locate 'foo' and then + // call addDynamic on it with the name 'bar.baz'. + else { + int firstDotIndex = name.indexOf("."); + String childName = name.substring(0, firstDotIndex); + String fullChildName = prefix == null ? childName : prefix + "." + childName; + ObjectMapper.Builder childBuilder = findChild(childName, fullChildName, context); + childBuilder.addDynamic(name.substring(firstDotIndex + 1), fullChildName, mapper, context); + mappersBuilders.add(childBuilder); + } + } + + private ObjectMapper.Builder findChild(String childName, String fullChildName, DocumentParserContext context) { + // does the child mapper already exist? if so, use that + ObjectMapper child = context.mappingLookup().objectMappers().get(fullChildName); + if (child != null) { + return child.newBuilder(context.indexSettings().getIndexVersionCreated()); + } + // has the child mapper been added as a dynamic update already? + child = context.getDynamicObjectMapper(fullChildName); + if (child != null) { + return child.newBuilder(context.indexSettings().getIndexVersionCreated()); + } + throw new IllegalArgumentException("Missing intermediate object " + fullChildName); + } + public Optional getBuilder(String name) { return mappersBuilders.stream().filter(b -> b.name().equals(name)).findFirst(); } @@ -237,10 +294,9 @@ protected static void parseProperties( private final String fullPath; protected Explicit enabled; - protected volatile Dynamic dynamic; - protected volatile CopyOnWriteHashMap mappers; + protected Map mappers; ObjectMapper(String name, String fullPath, Explicit enabled, Dynamic dynamic, Map mappers) { super(name); @@ -251,9 +307,9 @@ protected static void parseProperties( this.enabled = enabled; this.dynamic = dynamic; if (mappers == null) { - this.mappers = new CopyOnWriteHashMap<>(); + this.mappers = new HashMap<>(); } else { - this.mappers = CopyOnWriteHashMap.copyOf(mappers); + this.mappers = new HashMap<>(mappers); } } @@ -265,23 +321,18 @@ protected ObjectMapper clone() { } catch (CloneNotSupportedException e) { throw new RuntimeException(e); } + clone.mappers = new HashMap<>(clone.mappers); return clone; } - ObjectMapper copyAndReset() { - ObjectMapper copy = clone(); - // reset the sub mappers - copy.mappers = new CopyOnWriteHashMap<>(); - return copy; - } - /** - * Build a mapping update with the provided sub mapping update. + * @return a Builder that will produce an empty ObjectMapper with the same configuration as this one */ - final ObjectMapper mappingUpdate(Mapper mapper) { - ObjectMapper mappingUpdate = copyAndReset(); - mappingUpdate.putMapper(mapper); - return mappingUpdate; + public ObjectMapper.Builder newBuilder(Version indexVersionCreated) { + ObjectMapper.Builder builder = new ObjectMapper.Builder(simpleName()); + builder.enabled = this.enabled; + builder.dynamic = this.dynamic; + return builder; } @Override @@ -306,10 +357,6 @@ public Mapper getMapper(String field) { return mappers.get(field); } - protected void putMapper(Mapper mapper) { - mappers = mappers.copyAndPut(mapper.simpleName(), mapper); - } - @Override public Iterator iterator() { return mappers.values().iterator(); @@ -387,7 +434,7 @@ protected void doMerge(final ObjectMapper mergeWith, MergeReason reason) { merged = mergeIntoMapper.merge(mergeWithMapper); } } - putMapper(merged); + mappers.put(merged.simpleName(), merged); } } @@ -409,7 +456,6 @@ void toXContent(XContentBuilder builder, Params params, ToXContent custom) throw if (isEnabled() != Defaults.ENABLED) { builder.field("enabled", enabled.value()); } - if (custom != null) { custom.toXContent(builder, params); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java index 124d063b38a7b..dc1420bb0f9c7 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java @@ -67,9 +67,9 @@ public static class Builder extends ObjectMapper.Builder { protected Explicit dynamicTemplates = Defaults.DYNAMIC_TEMPLATES; protected Explicit dynamicDateTimeFormatters = Defaults.DYNAMIC_DATE_TIME_FORMATTERS; + protected final Map runtimeFields = new HashMap<>(); protected Explicit dateDetection = Defaults.DATE_DETECTION; protected Explicit numericDetection = Defaults.NUMERIC_DETECTION; - protected Map runtimeFields; public Builder(String name) { super(name); @@ -91,8 +91,13 @@ public RootObjectMapper.Builder add(Mapper.Builder builder) { return this; } - public RootObjectMapper.Builder setRuntime(Map runtimeFields) { - this.runtimeFields = runtimeFields; + public RootObjectMapper.Builder addRuntimeField(RuntimeField runtimeField) { + this.runtimeFields.put(runtimeField.name(), runtimeField); + return this; + } + + public RootObjectMapper.Builder addRuntimeFields(Map runtimeFields) { + this.runtimeFields.putAll(runtimeFields); return this; } @@ -103,7 +108,7 @@ public RootObjectMapper build(MapperBuilderContext context) { enabled, dynamic, buildMappers(true, context), - runtimeFields == null ? Collections.emptyMap() : runtimeFields, + runtimeFields, dynamicDateTimeFormatters, dynamicTemplates, dateDetection, @@ -228,7 +233,7 @@ private boolean processField( parserContext, true ); - builder.setRuntime(fields); + builder.addRuntimeFields(fields); return true; } else { throw new ElasticsearchParseException("runtime must be a map type"); @@ -271,18 +276,11 @@ protected ObjectMapper clone() { } @Override - RootObjectMapper copyAndReset() { - RootObjectMapper copy = (RootObjectMapper) super.copyAndReset(); - // for dynamic updates, no need to carry root-specific options, we just - // set everything to their implicit default value so that they are not - // applied at merge time - copy.dynamicTemplates = Defaults.DYNAMIC_TEMPLATES; - copy.dynamicDateTimeFormatters = Defaults.DYNAMIC_DATE_TIME_FORMATTERS; - copy.dateDetection = Defaults.DATE_DETECTION; - copy.numericDetection = Defaults.NUMERIC_DETECTION; - // also no need to carry the already defined runtime fields, only new ones need to be added - copy.runtimeFields.clear(); - return copy; + public RootObjectMapper.Builder newBuilder(Version indexVersionCreated) { + RootObjectMapper.Builder builder = new RootObjectMapper.Builder(name()); + builder.enabled = enabled; + builder.dynamic = dynamic; + return builder; } /** @@ -368,12 +366,6 @@ protected void doMerge(ObjectMapper mergeWith, MergeReason reason) { } } - void addRuntimeFields(Collection runtimeFields) { - for (RuntimeField runtimeField : runtimeFields) { - this.runtimeFields.put(runtimeField.name(), runtimeField); - } - } - @Override protected void doXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { final boolean includeDefaults = params.paramAsBoolean("include_defaults", false); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java index a93528357aa7a..c115f84da43e4 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java @@ -33,7 +33,6 @@ import java.math.BigDecimal; import java.math.BigInteger; import java.nio.charset.StandardCharsets; -import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.List; @@ -582,7 +581,22 @@ DocumentMapper createDummyMapping() throws Exception { } MapperService createMapperService() throws Exception { - return createMapperService(mapping(b -> { + return createMapperService(topMapping(b -> { + b.startArray("dynamic_templates"); + { + b.startObject(); + { + b.startObject("test"); + { + b.field("match", "runtime*"); + b.startObject("runtime").field("type", "keyword").endObject(); + } + b.endObject(); + } + b.endObject(); + } + b.endArray(); + b.startObject("properties"); b.startObject("y").field("type", "object").endObject(); b.startObject("x"); { @@ -602,45 +616,37 @@ MapperService createMapperService() throws Exception { b.endObject(); } b.endObject(); + b.endObject(); })); } - // creates an object mapper, which is about 100x harder than it should be.... - private static ObjectMapper createObjectMapper(String name) { - ContentPath path = new ContentPath(0); - String[] nameParts = name.split("\\."); - for (int i = 0; i < nameParts.length - 1; ++i) { - path.add(nameParts[i]); - } - return new ObjectMapper.Builder(nameParts[nameParts.length - 1]).enabled(true).build(MapperBuilderContext.forPath(path)); - } - public void testEmptyMappingUpdate() throws Exception { DocumentMapper docMapper = createDummyMapping(); - assertNull(DocumentParser.createDynamicUpdate(docMapper.mappers(), Collections.emptyList(), Collections.emptyList())); + ParsedDocument doc = docMapper.parse(source(b -> {})); + assertNull(doc.dynamicMappingsUpdate()); } public void testSingleMappingUpdate() throws Exception { DocumentMapper docMapper = createDummyMapping(); - List updates = Collections.singletonList(new MockFieldMapper("foo")); - Mapping mapping = DocumentParser.createDynamicUpdate(docMapper.mappers(), updates, Collections.emptyList()); + ParsedDocument doc = docMapper.parse(source(b -> b.field("foo", 10))); + Mapping mapping = doc.dynamicMappingsUpdate(); assertNotNull(mapping); assertNotNull(mapping.getRoot().getMapper("foo")); } public void testSingleRuntimeFieldMappingUpdate() throws Exception { DocumentMapper docMapper = createDummyMapping(); - List updates = Collections.singletonList(new TestRuntimeField("foo", "any")); - Mapping mapping = DocumentParser.createDynamicUpdate(docMapper.mappers(), Collections.emptyList(), updates); + ParsedDocument doc = docMapper.parse(source(b -> b.field("runtime-field", "10"))); + Mapping mapping = doc.dynamicMappingsUpdate(); assertNotNull(mapping); - assertNull(mapping.getRoot().getMapper("foo")); - assertNotNull(mapping.getRoot().getRuntimeField("foo")); + assertNull(mapping.getRoot().getMapper("runtime-field")); + assertNotNull(mapping.getRoot().getRuntimeField("runtime-field")); } public void testSubfieldMappingUpdate() throws Exception { DocumentMapper docMapper = createDummyMapping(); - List updates = Collections.singletonList(new MockFieldMapper("x.foo")); - Mapping mapping = DocumentParser.createDynamicUpdate(docMapper.mappers(), updates, Collections.emptyList()); + ParsedDocument doc = docMapper.parse(source(b -> b.field("x.foo", 10))); + Mapping mapping = doc.dynamicMappingsUpdate(); assertNotNull(mapping); Mapper xMapper = mapping.getRoot().getMapper("x"); assertNotNull(xMapper); @@ -650,21 +656,22 @@ public void testSubfieldMappingUpdate() throws Exception { } public void testRuntimeSubfieldMappingUpdate() throws Exception { - DocumentMapper docMapper = createDummyMapping(); - List updates = Collections.singletonList(new TestRuntimeField("x.foo", "any")); - Mapping mapping = DocumentParser.createDynamicUpdate(docMapper.mappers(), Collections.emptyList(), updates); + DocumentMapper docMapper = createDocumentMapper(topMapping(b -> b.field("dynamic", "runtime"))); + ParsedDocument doc = docMapper.parse(source(b -> b.field("runtime.foo", 10))); + Mapping mapping = doc.dynamicMappingsUpdate(); assertNotNull(mapping); - Mapper xMapper = mapping.getRoot().getMapper("x"); + Mapper xMapper = mapping.getRoot().getMapper("runtime"); assertNull(xMapper); - assertNotNull(mapping.getRoot().getRuntimeField("x.foo")); + assertNotNull(mapping.getRoot().getRuntimeField("runtime.foo")); } public void testMultipleSubfieldMappingUpdate() throws Exception { DocumentMapper docMapper = createDummyMapping(); - List updates = new ArrayList<>(); - updates.add(new MockFieldMapper("x.foo")); - updates.add(new MockFieldMapper("x.bar")); - Mapping mapping = DocumentParser.createDynamicUpdate(docMapper.mappers(), updates, Collections.emptyList()); + ParsedDocument doc = docMapper.parse(source(b -> { + b.field("x.foo", 10); + b.field("x.bar", 20); + })); + Mapping mapping = doc.dynamicMappingsUpdate(); assertNotNull(mapping); Mapper xMapper = mapping.getRoot().getMapper("x"); assertNotNull(xMapper); @@ -676,8 +683,8 @@ public void testMultipleSubfieldMappingUpdate() throws Exception { public void testDeepSubfieldMappingUpdate() throws Exception { DocumentMapper docMapper = createDummyMapping(); - List updates = Collections.singletonList(new MockFieldMapper("x.subx.foo")); - Mapping mapping = DocumentParser.createDynamicUpdate(docMapper.mappers(), updates, Collections.emptyList()); + ParsedDocument doc = docMapper.parse(source(b -> b.field("x.subx.foo", 10))); + Mapping mapping = doc.dynamicMappingsUpdate(); assertNotNull(mapping); Mapper xMapper = mapping.getRoot().getMapper("x"); assertNotNull(xMapper); @@ -690,10 +697,11 @@ public void testDeepSubfieldMappingUpdate() throws Exception { public void testDeepSubfieldAfterSubfieldMappingUpdate() throws Exception { DocumentMapper docMapper = createDummyMapping(); - List updates = new ArrayList<>(); - updates.add(new MockFieldMapper("x.a")); - updates.add(new MockFieldMapper("x.subx.b")); - Mapping mapping = DocumentParser.createDynamicUpdate(docMapper.mappers(), updates, Collections.emptyList()); + ParsedDocument doc = docMapper.parse(source(b -> { + b.field("x.a", 10); + b.field("x.subx.b", 10); + })); + Mapping mapping = doc.dynamicMappingsUpdate(); assertNotNull(mapping); Mapper xMapper = mapping.getRoot().getMapper("x"); assertNotNull(xMapper); @@ -707,12 +715,15 @@ public void testDeepSubfieldAfterSubfieldMappingUpdate() throws Exception { public void testObjectMappingUpdate() throws Exception { MapperService mapperService = createMapperService(); DocumentMapper docMapper = mapperService.documentMapper(); - List updates = new ArrayList<>(); - updates.add(createObjectMapper("foo")); - updates.add(createObjectMapper("foo.bar")); - updates.add(new MockFieldMapper("foo.bar.baz")); - updates.add(new MockFieldMapper("foo.field")); - Mapping mapping = DocumentParser.createDynamicUpdate(docMapper.mappers(), updates, Collections.emptyList()); + ParsedDocument doc = docMapper.parse(source(b -> { + b.startObject("foo"); + b.startObject("bar"); + b.field("baz", 10); + b.endObject(); + b.field("field", 10); + b.endObject(); + })); + Mapping mapping = doc.dynamicMappingsUpdate(); assertNotNull(mapping); Mapper fooMapper = mapping.getRoot().getMapper("foo"); assertNotNull(fooMapper); @@ -1772,40 +1783,17 @@ public void testDynamicDateDetectionEnabledWithNoSpecialCharacters() throws IOEx public void testDynamicFieldsStartingAndEndingWithDot() throws Exception { MapperService mapperService = createMapperService(mapping(b -> {})); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> mapperService.documentMapper().parse(source(b -> { - b.startArray("top."); - { - b.startObject(); - { - b.startArray("foo."); - { - b.startObject(); - { - b.startObject("bar."); - { - b.startObject("aoeu").field("a", 1).field("b", 2).endObject(); - } - b.endObject(); - } - b.endObject(); - } - b.endArray(); - } - b.endObject(); - } - b.endArray(); - }))); + Exception e = expectThrows(MapperParsingException.class, () -> mapperService.documentMapper().parse(source(""" + {"top..foo.":{"a":1}} + """))); - assertThat( - e.getMessage(), - containsString("object field starting or ending with a [.] makes object resolution ambiguous: [top..foo.]") - ); + assertThat(e.getCause().getMessage(), containsString("object field cannot contain only whitespace: ['top..foo.']")); } public void testDynamicFieldsEmptyName() throws Exception { DocumentMapper mapper = createDocumentMapper(mapping(b -> {})); - IllegalArgumentException emptyFieldNameException = expectThrows(IllegalArgumentException.class, () -> mapper.parse(source(b -> { + Exception emptyFieldNameException = expectThrows(MapperParsingException.class, () -> mapper.parse(source(b -> { b.startArray("top"); { b.startObject(); @@ -1817,7 +1805,7 @@ public void testDynamicFieldsEmptyName() throws Exception { b.endArray(); }))); - assertThat(emptyFieldNameException.getMessage(), containsString("object field cannot contain only whitespace: ['top.aoeu. ']")); + assertThat(emptyFieldNameException.getMessage(), containsString("Field name cannot contain only whitespace: [top.aoeu. ]")); } public void testBlankFieldNames() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java index f7a286f49aea1..b67b8c7bfc351 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java @@ -330,8 +330,8 @@ public void testIntroduceTwoFields() throws Exception { } public void testObject() throws Exception { - DocumentMapper mapper = createDocumentMapper(mapping(b -> {})); - ParsedDocument doc = mapper.parse(source(b -> { + MapperService mapperService = createMapperService(mapping(b -> {})); + ParsedDocument doc = mapperService.documentMapper().parse(source(b -> { b.startObject("foo"); { b.startObject("bar").field("baz", "foo").endObject(); @@ -340,6 +340,7 @@ public void testObject() throws Exception { })); assertNotNull(doc.dynamicMappingsUpdate()); + merge(mapperService, dynamicMapping(doc.dynamicMappingsUpdate())); assertThat(Strings.toString(doc.dynamicMappingsUpdate()), containsString(""" {"foo":{"properties":{"bar":{"properties":{"baz":{"type":"text\"""")); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java b/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java index 11c1b08063cc1..c4eccad926123 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java @@ -177,7 +177,7 @@ private static MappingLookup createMappingLookup( ) { RootObjectMapper.Builder builder = new RootObjectMapper.Builder("_doc"); Map runtimeFieldTypes = runtimeFields.stream().collect(Collectors.toMap(RuntimeField::name, r -> r)); - builder.setRuntime(runtimeFieldTypes); + builder.addRuntimeFields(runtimeFieldTypes); Mapping mapping = new Mapping(builder.build(MapperBuilderContext.ROOT), new MetadataFieldMapper[0], Collections.emptyMap()); return MappingLookup.fromMappers(mapping, fieldMappers, objectMappers, fieldAliasMappers); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupTests.java index 427752a5c6339..32678e3aeea5c 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupTests.java @@ -41,7 +41,7 @@ private static MappingLookup createMappingLookup( ) { RootObjectMapper.Builder builder = new RootObjectMapper.Builder("_doc"); Map runtimeFieldTypes = runtimeFields.stream().collect(Collectors.toMap(RuntimeField::name, r -> r)); - builder.setRuntime(runtimeFieldTypes); + builder.addRuntimeFields(runtimeFieldTypes); Mapping mapping = new Mapping(builder.build(MapperBuilderContext.ROOT), new MetadataFieldMapper[0], Collections.emptyMap()); return MappingLookup.fromMappers(mapping, fieldMappers, objectMappers, emptyList()); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java index 32caede9f2189..199d9df5aae7f 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java @@ -63,7 +63,7 @@ private static MappingParser createMappingParser(Settings settings) { ); } - public void testFieldNameWithDots() throws Exception { + public void testFieldNameWithDotsDisallowed() throws Exception { XContentBuilder builder = mapping(b -> { b.startObject("foo.bar").field("type", "text").endObject(); b.startObject("foo.baz").field("type", "keyword").endObject(); @@ -97,7 +97,7 @@ public void testFieldNameWithDeepDots() throws Exception { assertNotNull(mappingLookup.objectMappers().get("foo")); } - public void testFieldNameWithDotsConflict() throws IOException { + public void testFieldNameWithDotPrefixDisallowed() throws IOException { XContentBuilder builder = mapping(b -> { b.startObject("foo").field("type", "text").endObject(); b.startObject("foo.baz").field("type", "keyword").endObject(); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java index 0c6cb81015177..b2d2e7bfdd392 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java @@ -105,7 +105,7 @@ public void testMergeDisabledRootMapper() { final RootObjectMapper rootObjectMapper = (RootObjectMapper) new RootObjectMapper.Builder(type).enabled(false) .build(MapperBuilderContext.ROOT); // the root is disabled, and we are not trying to re-enable it, but we do want to be able to add runtime fields - final RootObjectMapper mergeWith = new RootObjectMapper.Builder(type).setRuntime( + final RootObjectMapper mergeWith = new RootObjectMapper.Builder(type).addRuntimeFields( Collections.singletonMap("test", new TestRuntimeField("test", "long")) ).build(MapperBuilderContext.ROOT); @@ -133,20 +133,11 @@ public void testMergeNested() { } private static RootObjectMapper createRootObjectMapper(String name, boolean enabled, Map mappers) { - final RootObjectMapper rootObjectMapper = (RootObjectMapper) new RootObjectMapper.Builder(name).enabled(enabled) - .build(MapperBuilderContext.ROOT); - - mappers.values().forEach(rootObjectMapper::putMapper); - - return rootObjectMapper; + return (RootObjectMapper) new RootObjectMapper.Builder(name).enabled(enabled).addMappers(mappers).build(MapperBuilderContext.ROOT); } private static ObjectMapper createObjectMapper(String name, boolean enabled, Map mappers) { - final ObjectMapper mapper = new ObjectMapper.Builder(name).enabled(enabled).build(MapperBuilderContext.ROOT); - - mappers.values().forEach(mapper::putMapper); - - return mapper; + return new ObjectMapper.Builder(name).enabled(enabled).addMappers(mappers).build(MapperBuilderContext.ROOT); } private TextFieldMapper createTextFieldMapper(String name) { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java index 67ae6f0eb2e8a..14bd9baba3225 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java @@ -42,7 +42,7 @@ public void testDifferentInnerObjectTokenFailure() throws Exception { "value":"value" }""".indent(1)), XContentType.JSON)) ); - assertTrue(e.getMessage(), e.getMessage().contains("cannot be changed from type")); + assertThat(e.getMessage(), containsString("can't merge a non object mapping [object.array.object] with an object mapping")); } public void testEmptyArrayProperties() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java b/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java index 9a7dce9a4f35b..8af1ff17c6f2c 100644 --- a/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java @@ -328,7 +328,7 @@ private static MappingLookup createMappingLookup(List concreteF List mappers = concreteFields.stream().map(MockFieldMapper::new).collect(Collectors.toList()); RootObjectMapper.Builder builder = new RootObjectMapper.Builder("_doc"); Map runtimeFieldTypes = runtimeFields.stream().collect(Collectors.toMap(RuntimeField::name, r -> r)); - builder.setRuntime(runtimeFieldTypes); + builder.addRuntimeFields(runtimeFieldTypes); Mapping mapping = new Mapping(builder.build(MapperBuilderContext.ROOT), new MetadataFieldMapper[0], Collections.emptyMap()); return MappingLookup.fromMappers(mapping, mappers, Collections.emptyList(), Collections.emptyList()); } From b9aebc8d64d5a45587db32f6fc775ca36b3378a5 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Tue, 25 Jan 2022 07:39:28 -0500 Subject: [PATCH 014/100] [ML] fix NLP tokenization never_split handling around punctuation (#82982) When multiple characters in a row might be part of the never_split we erroneously tokenized them. This commit handles this scenario so now `[[UNK]` is now tokenized as `[`, `[UNK]` --- .../xpack/ml/inference/nlp/tokenizers/BasicTokenizer.java | 8 +++++++- .../ml/inference/nlp/tokenizers/BasicTokenizerTests.java | 5 +++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/BasicTokenizer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/BasicTokenizer.java index 789710cefbfb2..561fd429422bf 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/BasicTokenizer.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/BasicTokenizer.java @@ -128,7 +128,13 @@ private List mergeNeverSplitTokens(String originalText, List(); current = neverSplitTokenTrieRoot; } - mergedTokens.add(token); + childNode = current.getChild(token.getToken()); + if (childNode == null) { + mergedTokens.add(token); + } else { + matchingTokens.add(token); + current = childNode; + } } else if (childNode.isLeaf()) { matchingTokens.add(token); DelimitedToken mergedToken = DelimitedToken.mergeTokens(matchingTokens); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/BasicTokenizerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/BasicTokenizerTests.java index 0e08f31989a90..effe3be0da5a6 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/BasicTokenizerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/BasicTokenizerTests.java @@ -79,6 +79,11 @@ public void testNeverSplit_GivenNoLowerCase() { assertThat(tokenStrings(tokens), contains("Hello", "-", "[UNK]")); tokens = tokenizer.tokenize("Hello~[UNK][UNK]"); assertThat(tokenStrings(tokens), contains("Hello", "~", "[UNK]", "[UNK]")); + assertThat(tokenStrings(tokenizer.tokenize("Hello~[[UNK]")), contains("Hello", "~", "[", "[UNK]")); + assertThat(tokenStrings(tokenizer.tokenize("Hello~[[[UNK]")), contains("Hello", "~", "[", "[", "[UNK]")); + assertThat(tokenStrings(tokenizer.tokenize("Hello~[UNK]]")), contains("Hello", "~", "[UNK]", "]")); + assertThat(tokenStrings(tokenizer.tokenize("Hello~[UNK]]]")), contains("Hello", "~", "[UNK]", "]", "]")); + assertThat(tokenStrings(tokenizer.tokenize("Hello~[[UNK]]")), contains("Hello", "~", "[", "[UNK]", "]")); tokens = tokenizer.tokenize("Hello-[unk]"); assertThat(tokenStrings(tokens), contains("Hello", "-", "[", "unk", "]")); } From a5affc7104707033518c1070ec446a4955ede974 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Tue, 25 Jan 2022 15:28:06 +0100 Subject: [PATCH 015/100] [DOCS] Fixes field names in ML sum functions. (#83048) --- .../anomaly-detection/functions/ml-count-functions.asciidoc | 4 ++-- .../ml/anomaly-detection/functions/ml-sum-functions.asciidoc | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/reference/ml/anomaly-detection/functions/ml-count-functions.asciidoc b/docs/reference/ml/anomaly-detection/functions/ml-count-functions.asciidoc index b4ac6dc1c14f7..2ef14025986fe 100644 --- a/docs/reference/ml/anomaly-detection/functions/ml-count-functions.asciidoc +++ b/docs/reference/ml/anomaly-detection/functions/ml-count-functions.asciidoc @@ -281,5 +281,5 @@ PUT _ml/anomaly_detectors/example7 This example detects instances of port scanning. When you use this function in a detector in your {anomaly-job}, it models the distinct count of ports. It also -detects the `src_ip` values that connect to an unusually high number of different -`dst_ports` values compared to other `src_ip` values. +detects the `src_ip` values that connect to an unusually high number of +different `dst_ports` values compared to other `src_ip` values. diff --git a/docs/reference/ml/anomaly-detection/functions/ml-sum-functions.asciidoc b/docs/reference/ml/anomaly-detection/functions/ml-sum-functions.asciidoc index 398bdba30075c..423a00154fe88 100644 --- a/docs/reference/ml/anomaly-detection/functions/ml-sum-functions.asciidoc +++ b/docs/reference/ml/anomaly-detection/functions/ml-sum-functions.asciidoc @@ -101,8 +101,8 @@ is not applicable for this function. -------------------------------------------------- { "function" : "high_non_null_sum", - "fieldName" : "amount_approved", - "byFieldName" : "employee" + "field_name" : "amount_approved", + "by_field_name" : "employee" } -------------------------------------------------- // NOTCONSOLE From 27afb6fb3a52d729fdad4369176df09d6e82baf2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Przemys=C5=82aw=20Witek?= Date: Tue, 25 Jan 2022 15:30:44 +0100 Subject: [PATCH 016/100] [Transform] Fix condition on which the transform stops processing buckets (#82852) --- docs/changelog/82852.yaml | 5 ++ .../integration/TransformPivotRestIT.java | 6 +- .../TransformPivotRestSpecialCasesIT.java | 67 +++++++++++++++++++ .../integration/TransformRestTestCase.java | 7 +- .../TransformTaskFailedStateIT.java | 4 +- .../common/AbstractCompositeAggFunction.java | 2 +- .../transforms/pivot/PivotTests.java | 50 ++++++++++++++ 7 files changed, 132 insertions(+), 9 deletions(-) create mode 100644 docs/changelog/82852.yaml diff --git a/docs/changelog/82852.yaml b/docs/changelog/82852.yaml new file mode 100644 index 0000000000000..8a393b6ed0212 --- /dev/null +++ b/docs/changelog/82852.yaml @@ -0,0 +1,5 @@ +pr: 82852 +summary: Fix condition on which the transform stops processing buckets +area: Transform +type: bug +issues: [] diff --git a/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformPivotRestIT.java b/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformPivotRestIT.java index fba609aa88e89..25f912fc52f8d 100644 --- a/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformPivotRestIT.java +++ b/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformPivotRestIT.java @@ -98,7 +98,7 @@ public void testSimplePivot() throws Exception { public void testSimpleDataStreamPivot() throws Exception { String indexName = "reviews_data_stream"; - createReviewsIndex(indexName, 1000, "date", true, -1, null); + createReviewsIndex(indexName, 1000, 27, "date", true, -1, null); String transformId = "simple_data_stream_pivot"; String transformIndex = "pivot_reviews_data_stream"; setupDataAccessRole(DATA_ACCESS_ROLE, indexName, transformIndex); @@ -363,7 +363,7 @@ public void testBucketSelectorPivot() throws Exception { public void testContinuousPivot() throws Exception { String indexName = "continuous_reviews"; - createReviewsIndex(indexName, 1000, "date", false, 5, "user_id"); + createReviewsIndex(indexName, 1000, 27, "date", false, 5, "user_id"); String transformId = "simple_continuous_pivot"; String transformIndex = "pivot_reviews_continuous"; setupDataAccessRole(DATA_ACCESS_ROLE, indexName, transformIndex); @@ -1283,7 +1283,7 @@ public void testPivotWithGeoBoundsAgg() throws Exception { String indexName = "reviews_geo_bounds"; // gh#71874 regression test: create some sparse data - createReviewsIndex(indexName, 1000, "date", false, 5, "location"); + createReviewsIndex(indexName, 1000, 27, "date", false, 5, "location"); setupDataAccessRole(DATA_ACCESS_ROLE, indexName, transformIndex); diff --git a/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformPivotRestSpecialCasesIT.java b/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformPivotRestSpecialCasesIT.java index 24e5c13b48201..15d556998ab3d 100644 --- a/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformPivotRestSpecialCasesIT.java +++ b/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformPivotRestSpecialCasesIT.java @@ -22,6 +22,7 @@ import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; public class TransformPivotRestSpecialCasesIT extends TransformRestTestCase { private static boolean indicesCreated = false; @@ -235,4 +236,70 @@ public void testSparseDataPercentiles() throws Exception { assertTrue(percentilesEmpty.containsKey("99")); assertNull(percentilesEmpty.get("99")); } + + /** + * This test verifies that regardless of the max_page_search_size setting value used, the transform works correctly in the face of + * restrictive bucket selector. + * In the past there was a problem when there were no buckets (because bucket selector filtered them out) in a composite aggregation + * page and for small enough max_page_search_size the transform stopped prematurely. + * The problem was fixed by https://github.com/elastic/elasticsearch/pull/82852 and this test serves as a regression test for this PR. + */ + public void testRestrictiveBucketSelector() throws Exception { + String indexName = "special_pivot_bucket_selector_reviews"; + createReviewsIndex(indexName, 1000, 327, "date", false, 5, "affiliate_id"); + + verifyDestIndexHitsCount(indexName, "special_pivot_bucket_selector-10", 10, 14); + verifyDestIndexHitsCount(indexName, "special_pivot_bucket_selector-10000", 10000, 14); + } + + private void verifyDestIndexHitsCount(String sourceIndex, String transformId, int maxPageSearchSize, long expectedDestIndexCount) + throws Exception { + String transformIndex = transformId; + String config = """ + { + "source": { + "index": "%s" + }, + "dest": { + "index": "%s" + }, + "frequency": "1m", + "pivot": { + "group_by": { + "user_id": { + "terms": { + "field": "user_id" + } + } + }, + "aggregations": { + "stars_sum": { + "sum": { + "field": "stars" + } + }, + "bs": { + "bucket_selector": { + "buckets_path": { + "stars_sum": "stars_sum.value" + }, + "script": "params.stars_sum > 20" + } + } + } + }, + "settings": { + "max_page_search_size": %s + } + }""".formatted(sourceIndex, transformIndex, maxPageSearchSize); + Request createTransformRequest = new Request("PUT", getTransformEndpoint() + transformId); + createTransformRequest.setJsonEntity(config); + Map createTransformResponse = entityAsMap(client().performRequest(createTransformRequest)); + assertThat(createTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); + startAndWaitForTransform(transformId, transformIndex); + assertTrue(indexExists(transformIndex)); + Map searchResult = getAsMap(transformIndex + "/_search"); + long count = (Integer) XContentMapValues.extractValue("hits.total.value", searchResult); + assertThat(count, is(equalTo(expectedDestIndexCount))); + } } diff --git a/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformRestTestCase.java b/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformRestTestCase.java index 0d82ff9cceaf3..a3b87ac0f0caf 100644 --- a/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformRestTestCase.java +++ b/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformRestTestCase.java @@ -58,6 +58,7 @@ protected Settings restClientSettings() { protected void createReviewsIndex( String indexName, int numDocs, + int numUsers, String dateType, boolean isDataStream, int userWithMissingBuckets, @@ -75,7 +76,7 @@ protected void createReviewsIndex( bulk.append(""" {"create":{"_index":"%s"}} """.formatted(indexName)); - long user = Math.round(Math.pow(i * 31 % 1000, distributionTable[i % distributionTable.length]) % 27); + long user = Math.round(Math.pow(i * 31 % 1000, distributionTable[i % distributionTable.length]) % numUsers); int stars = distributionTable[(i * 33) % distributionTable.length]; long business = Math.round(Math.pow(user * stars, distributionTable[i % distributionTable.length]) % 13); long affiliate = Math.round(Math.pow(user * stars, distributionTable[i % distributionTable.length]) % 11); @@ -203,7 +204,7 @@ protected void createReviewsIndex() throws IOException { } protected void createReviewsIndex(String indexName) throws IOException { - createReviewsIndex(indexName, 1000, "date", false, 5, "affiliate_id"); + createReviewsIndex(indexName, 1000, 27, "date", false, 5, "affiliate_id"); } protected void createPivotReviewsTransform(String transformId, String transformIndex, String query) throws IOException { @@ -216,7 +217,7 @@ protected void createPivotReviewsTransform(String transformId, String transformI } protected void createReviewsIndexNano() throws IOException { - createReviewsIndex(REVIEWS_DATE_NANO_INDEX_NAME, 1000, "date_nanos", false, -1, null); + createReviewsIndex(REVIEWS_DATE_NANO_INDEX_NAME, 1000, 27, "date_nanos", false, -1, null); } protected void createContinuousPivotReviewsTransform(String transformId, String transformIndex, String authHeader) throws IOException { diff --git a/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformTaskFailedStateIT.java b/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformTaskFailedStateIT.java index 1f52c2ff9d311..9f74d445252d2 100644 --- a/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformTaskFailedStateIT.java +++ b/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformTaskFailedStateIT.java @@ -57,7 +57,7 @@ public void cleanUpPotentiallyFailedTransform() throws Exception { public void testForceStopFailedTransform() throws Exception { String transformId = "test-force-stop-failed-transform"; - createReviewsIndex(REVIEWS_INDEX_NAME, 10, "date", false, -1, null); + createReviewsIndex(REVIEWS_INDEX_NAME, 10, 27, "date", false, -1, null); String transformIndex = "failure_pivot_reviews"; createDestinationIndexWithBadMapping(transformIndex); createContinuousPivotReviewsTransform(transformId, transformIndex, null); @@ -94,7 +94,7 @@ public void testForceStopFailedTransform() throws Exception { public void testStartFailedTransform() throws Exception { String transformId = "test-force-start-failed-transform"; - createReviewsIndex(REVIEWS_INDEX_NAME, 10, "date", false, -1, null); + createReviewsIndex(REVIEWS_INDEX_NAME, 10, 27, "date", false, -1, null); String transformIndex = "failure_pivot_reviews"; createDestinationIndexWithBadMapping(transformIndex); createContinuousPivotReviewsTransform(transformId, transformIndex, null); diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/common/AbstractCompositeAggFunction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/common/AbstractCompositeAggFunction.java index 93517c14a2b54..5e6cdac09e2c2 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/common/AbstractCompositeAggFunction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/common/AbstractCompositeAggFunction.java @@ -149,7 +149,7 @@ public Tuple, Map> processSearchResponse( } CompositeAggregation compositeAgg = aggregations.get(COMPOSITE_AGGREGATION_NAME); - if (compositeAgg == null || compositeAgg.getBuckets().isEmpty()) { + if (compositeAgg == null || compositeAgg.afterKey() == null) { return null; } diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/PivotTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/PivotTests.java index aa2f85e5491f9..91d0c90134bfc 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/PivotTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/PivotTests.java @@ -25,6 +25,8 @@ import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.aggregations.Aggregations; +import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregation; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.client.NoOpClient; import org.elasticsearch.xcontent.DeprecationHandler; @@ -34,12 +36,14 @@ import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.core.transform.TransformDeprecations; import org.elasticsearch.xpack.core.transform.transforms.SettingsConfig; +import org.elasticsearch.xpack.core.transform.transforms.SettingsConfigTests; import org.elasticsearch.xpack.core.transform.transforms.SourceConfig; import org.elasticsearch.xpack.core.transform.transforms.pivot.AggregationConfig; import org.elasticsearch.xpack.core.transform.transforms.pivot.AggregationConfigTests; import org.elasticsearch.xpack.core.transform.transforms.pivot.GroupConfig; import org.elasticsearch.xpack.core.transform.transforms.pivot.GroupConfigTests; import org.elasticsearch.xpack.core.transform.transforms.pivot.PivotConfig; +import org.elasticsearch.xpack.core.transform.transforms.pivot.PivotConfigTests; import org.elasticsearch.xpack.spatial.SpatialPlugin; import org.elasticsearch.xpack.transform.Transform; import org.elasticsearch.xpack.transform.transforms.Function; @@ -51,6 +55,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -63,7 +68,11 @@ import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class PivotTests extends ESTestCase { @@ -218,6 +227,47 @@ public void testGetPerformanceCriticalFields() throws IOException { assertThat(pivot.getPerformanceCriticalFields(), contains("field-A", "field-B", "field-C")); } + public void testProcessSearchResponse() { + Function pivot = new Pivot( + PivotConfigTests.randomPivotConfig(), + SettingsConfigTests.randomSettingsConfig(), + Version.CURRENT, + Collections.emptySet() + ); + + Aggregations aggs = null; + assertThat(pivot.processSearchResponse(searchResponseFromAggs(aggs), null, null, null, null, null), is(nullValue())); + + aggs = new Aggregations(List.of()); + assertThat(pivot.processSearchResponse(searchResponseFromAggs(aggs), null, null, null, null, null), is(nullValue())); + + CompositeAggregation compositeAgg = mock(CompositeAggregation.class); + when(compositeAgg.getName()).thenReturn("_transform"); + when(compositeAgg.getBuckets()).thenReturn(List.of()); + when(compositeAgg.afterKey()).thenReturn(null); + aggs = new Aggregations(List.of(compositeAgg)); + assertThat(pivot.processSearchResponse(searchResponseFromAggs(aggs), null, null, null, null, null), is(nullValue())); + + when(compositeAgg.getBuckets()).thenReturn(List.of()); + when(compositeAgg.afterKey()).thenReturn(Map.of("key", "value")); + aggs = new Aggregations(List.of(compositeAgg)); + // Empty bucket list is *not* a stop condition for composite agg processing. + assertThat(pivot.processSearchResponse(searchResponseFromAggs(aggs), null, null, null, null, null), is(notNullValue())); + + CompositeAggregation.Bucket bucket = mock(CompositeAggregation.Bucket.class); + List buckets = List.of(bucket); + doReturn(buckets).when(compositeAgg).getBuckets(); + when(compositeAgg.afterKey()).thenReturn(null); + aggs = new Aggregations(List.of(compositeAgg)); + assertThat(pivot.processSearchResponse(searchResponseFromAggs(aggs), null, null, null, null, null), is(nullValue())); + } + + private static SearchResponse searchResponseFromAggs(Aggregations aggs) { + SearchResponseSections sections = new SearchResponseSections(null, aggs, null, false, null, null, 1); + SearchResponse searchResponse = new SearchResponse(sections, null, 10, 5, 0, 0, new ShardSearchFailure[0], null); + return searchResponse; + } + private class MyMockClient extends NoOpClient { MyMockClient(String testName) { super(testName); From c61881ce0e794e596c1465d47ca5f4cf01285309 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Tue, 25 Jan 2022 14:47:20 +0000 Subject: [PATCH 017/100] [ML] Update running process when global calendar changes (#83044) Adding events to global calendars did not update open jobs as the special _all job Id was not checked. --- docs/changelog/83044.yaml | 5 ++ .../xpack/core/ml/job/config/JobUpdate.java | 6 ++ .../ml/integration/ScheduledEventsIT.java | 60 ++++++++++++---- .../xpack/ml/job/JobManager.java | 69 ++++++++++--------- 4 files changed, 96 insertions(+), 44 deletions(-) create mode 100644 docs/changelog/83044.yaml diff --git a/docs/changelog/83044.yaml b/docs/changelog/83044.yaml new file mode 100644 index 0000000000000..4ba59ff68d073 --- /dev/null +++ b/docs/changelog/83044.yaml @@ -0,0 +1,5 @@ +pr: 83044 +summary: Update running process when global calendar changes +area: Machine Learning +type: bug +issues: [] diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java index 0ce7366d4a281..0152e234c4078 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.ml.job.config; import org.elasticsearch.Version; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -395,6 +396,11 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + @Override + public String toString() { + return Strings.toString(this::toXContent); + } + public Set getUpdateFields() { Set updateFields = new TreeSet<>(); if (groups != null) { diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ScheduledEventsIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ScheduledEventsIT.java index 0a69500e51d5e..d7778bf570430 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ScheduledEventsIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ScheduledEventsIT.java @@ -33,6 +33,7 @@ import java.util.List; import java.util.stream.Collectors; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -372,7 +373,10 @@ public void testAddOpenedJobToGroupWithCalendar() throws Exception { } /** - * An open job that later gets added to a calendar, should take the scheduled events into account + * Add a global calendar then create a job that will pick + * up the calendar. + * Add a new scheduled event to the calendar, the open + * job should pick up the new event */ public void testNewJobWithGlobalCalendar() throws Exception { String calendarId = "test-global-calendar"; @@ -381,28 +385,56 @@ public void testNewJobWithGlobalCalendar() throws Exception { putCalendar(calendarId, Collections.singletonList(Metadata.ALL), "testNewJobWithGlobalCalendar calendar"); long startTime = 1514764800000L; - final int bucketCount = 3; + final int bucketCount = 6; TimeValue bucketSpan = TimeValue.timeValueMinutes(30); // Put events in the calendar - List events = new ArrayList<>(); + List preOpenEvents = new ArrayList<>(); long eventStartTime = startTime; long eventEndTime = eventStartTime + (long) (1.5 * bucketSpan.millis()); - events.add( - new ScheduledEvent.Builder().description("Some Event") + preOpenEvents.add( + new ScheduledEvent.Builder().description("Pre open Event") .startTime((Instant.ofEpochMilli(eventStartTime))) .endTime((Instant.ofEpochMilli(eventEndTime))) .calendarId(calendarId) .build() ); - postScheduledEvents(calendarId, events); - - Job.Builder job = createJob("scheduled-events-add-to-new-job--with-global-calendar", bucketSpan); + postScheduledEvents(calendarId, preOpenEvents); // Open the job + Job.Builder job = createJob("scheduled-events-add-to-new-job--with-global-calendar", bucketSpan); openJob(job.getId()); + // Add another event after the job is opened + List postOpenJobEvents = new ArrayList<>(); + eventStartTime = eventEndTime + (3 * bucketSpan.millis()); + eventEndTime = eventStartTime + bucketSpan.millis(); + postOpenJobEvents.add( + new ScheduledEvent.Builder().description("Event added after job is opened") + .startTime((Instant.ofEpochMilli(eventStartTime))) + .endTime((Instant.ofEpochMilli(eventEndTime))) + .calendarId(calendarId) + .build() + ); + postScheduledEvents(calendarId, postOpenJobEvents); + + // Wait until the notification that the job was updated is indexed + assertBusy(() -> { + SearchResponse searchResponse = client().prepareSearch(NotificationsIndex.NOTIFICATIONS_INDEX) + .setSize(1) + .addSort("timestamp", SortOrder.DESC) + .setQuery( + QueryBuilders.boolQuery() + .filter(QueryBuilders.termQuery("job_id", job.getId())) + .filter(QueryBuilders.termQuery("level", "info")) + ) + .get(); + SearchHit[] hits = searchResponse.getHits().getHits(); + assertThat(hits.length, equalTo(1)); + assertThat(hits[0].getSourceAsMap().get("message"), equalTo("Updated calendars in running process")); + }); + // write some buckets of data postData( job.getId(), @@ -416,12 +448,14 @@ public void testNewJobWithGlobalCalendar() throws Exception { GetBucketsAction.Request getBucketsRequest = new GetBucketsAction.Request(job.getId()); List buckets = getBuckets(getBucketsRequest); - // 1st and 2nd buckets have the event but the last one does not - assertEquals(1, buckets.get(0).getScheduledEvents().size()); - assertEquals("Some Event", buckets.get(0).getScheduledEvents().get(0)); - assertEquals(1, buckets.get(1).getScheduledEvents().size()); - assertEquals("Some Event", buckets.get(1).getScheduledEvents().get(0)); + // 1st and 2nd buckets have the first event + // 5th and 6th buckets have the second event + assertThat(buckets.get(0).getScheduledEvents(), contains("Pre open Event")); + assertThat(buckets.get(1).getScheduledEvents(), contains("Pre open Event")); assertEquals(0, buckets.get(2).getScheduledEvents().size()); + assertEquals(0, buckets.get(3).getScheduledEvents().size()); + assertThat(buckets.get(4).getScheduledEvents(), contains("Event added after job is opened")); + assertThat(buckets.get(5).getScheduledEvents(), contains("Event added after job is opened")); } private Job.Builder createJob(String jobId, TimeValue bucketSpan) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java index be859dd02a211..ab63c05df7908 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java @@ -8,6 +8,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.Version; @@ -27,9 +28,6 @@ import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; -import org.elasticsearch.xcontent.ToXContent; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xpack.core.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.MlConfigIndex; import org.elasticsearch.xpack.core.ml.MlTasks; @@ -63,6 +61,7 @@ import org.elasticsearch.xpack.ml.utils.VoidChainTaskExecutor; import java.io.IOException; +import java.util.Collection; import java.util.Collections; import java.util.Date; import java.util.List; @@ -426,28 +425,28 @@ public void deleteJob( private void postJobUpdate(UpdateJobAction.Request request, Job updatedJob, ActionListener actionListener) { // Autodetect must be updated if the fields that the C++ uses are changed - if (request.getJobUpdate().isAutodetectProcessUpdate()) { - JobUpdate jobUpdate = request.getJobUpdate(); + JobUpdate jobUpdate = request.getJobUpdate(); + if (jobUpdate.isAutodetectProcessUpdate()) { if (isJobOpen(clusterService.state(), request.getJobId())) { updateJobProcessNotifier.submitJobUpdate(UpdateParams.fromJobUpdate(jobUpdate), ActionListener.wrap(isUpdated -> { if (isUpdated) { auditJobUpdatedIfNotInternal(request); + } else { + logger.error("[{}] Updating autodetect failed for job update [{}]", jobUpdate.getJobId(), jobUpdate); } }, e -> { - // No need to do anything + logger.error( + new ParameterizedMessage( + "[{}] Updating autodetect failed with an exception, job update [{}] ", + jobUpdate.getJobId(), + jobUpdate + ), + e + ); })); } } else { - logger.debug("[{}] No process update required for job update: {}", request::getJobId, () -> { - try { - XContentBuilder jsonBuilder = XContentFactory.jsonBuilder(); - request.getJobUpdate().toXContent(jsonBuilder, ToXContent.EMPTY_PARAMS); - return Strings.toString(jsonBuilder); - } catch (IOException e) { - return "(unprintable due to " + e.getMessage() + ")"; - } - }); - + logger.debug("[{}] No process update required for job update: {}", jobUpdate::getJobId, jobUpdate::toString); auditJobUpdatedIfNotInternal(request); } @@ -610,32 +609,40 @@ public void updateProcessOnCalendarChanged(List calendarJobIds, ActionLi return; } + boolean appliesToAllJobs = calendarJobIds.stream().anyMatch(Metadata.ALL::equals); + if (appliesToAllJobs) { + submitJobEventUpdate(openJobIds, updateListener); + return; + } + // calendarJobIds may be a group or job jobConfigProvider.expandGroupIds( calendarJobIds, ActionListener.wrap(expandedIds -> threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME).execute(() -> { - // Merge the expended group members with the request Ids. + // Merge the expanded group members with the request Ids. // Ids that aren't jobs will be filtered by isJobOpen() expandedIds.addAll(calendarJobIds); - for (String jobId : expandedIds) { - if (isJobOpen(clusterState, jobId)) { - updateJobProcessNotifier.submitJobUpdate( - UpdateParams.scheduledEventsUpdate(jobId), - ActionListener.wrap(isUpdated -> { - if (isUpdated) { - auditor.info(jobId, Messages.getMessage(Messages.JOB_AUDIT_CALENDARS_UPDATED_ON_PROCESS)); - } - }, e -> logger.error("[" + jobId + "] failed submitting process update on calendar change", e)) - ); - } - } - - updateListener.onResponse(Boolean.TRUE); + openJobIds.retainAll(expandedIds); + submitJobEventUpdate(openJobIds, updateListener); }), updateListener::onFailure) ); } + private void submitJobEventUpdate(Collection jobIds, ActionListener updateListener) { + for (String jobId : jobIds) { + updateJobProcessNotifier.submitJobUpdate( + UpdateParams.scheduledEventsUpdate(jobId), + ActionListener.wrap( + isUpdated -> { auditor.info(jobId, Messages.getMessage(Messages.JOB_AUDIT_CALENDARS_UPDATED_ON_PROCESS)); }, + e -> logger.error("[" + jobId + "] failed submitting process update on calendar change", e) + ) + ); + } + + updateListener.onResponse(Boolean.TRUE); + } + public void revertSnapshot( RevertModelSnapshotAction.Request request, ActionListener actionListener, From 04d520c212dbb0eed2f7dd6ded83f0462d72c4d3 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Tue, 25 Jan 2022 15:14:48 +0000 Subject: [PATCH 018/100] [ML][DOCS] Add Trained model APIs to the REST APIs index (#82791) --- docs/reference/rest-api/index.asciidoc | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/reference/rest-api/index.asciidoc b/docs/reference/rest-api/index.asciidoc index 2426234565e36..1a8569b42d3b2 100644 --- a/docs/reference/rest-api/index.asciidoc +++ b/docs/reference/rest-api/index.asciidoc @@ -32,6 +32,7 @@ not be included yet. * <> * <> * <> +* <> * <> * <> * <> From b41397111c9dd86b6e1f115dd71b386be30302b4 Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Tue, 25 Jan 2022 10:17:33 -0500 Subject: [PATCH 019/100] [DOCS] Rename example stored script (#83054) Changes: * Renames the example stored script to avoid naming collisions with the [stored script API docs](https://www.elastic.co/guide/en/elasticsearch/reference/master/create-stored-script-api.html). * Adds a hidden snippet to delete the script for cleanup. Relates to https://github.com/elastic/elasticsearch/issues/83038 --- docs/reference/ingest.asciidoc | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/docs/reference/ingest.asciidoc b/docs/reference/ingest.asciidoc index fa4308ba9a155..4c4b1b5872f16 100644 --- a/docs/reference/ingest.asciidoc +++ b/docs/reference/ingest.asciidoc @@ -848,7 +848,7 @@ You can also specify a <> as the [source,console] ---- -PUT _scripts/my-stored-script +PUT _scripts/my-prod-tag-script { "script": { "lang": "painless", @@ -872,13 +872,21 @@ PUT _ingest/pipeline/my-pipeline { "drop": { "description": "Drop documents that don't contain 'prod' tag", - "if": { "id": "my-stored-script" } + "if": { "id": "my-prod-tag-script" } } } ] } ---- +//// +[source,console] +---- +DELETE _scripts/my-prod-tag-script +---- +// TEST[continued] +//// + Incoming documents often contain object fields. If a processor script attempts to access a field whose parent object does not exist, {es} returns a `NullPointerException`. To avoid these exceptions, use From b552d5cb0e475ade163d0a6b4d43feb940007dec Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Tue, 25 Jan 2022 10:27:10 -0500 Subject: [PATCH 020/100] [DOCS] Re-add network traffic para to `term` query (#83047) Re-adds a paragraph about minimizing network traffic for a terms lookup. This paragraph was erroneously removed as part of https://github.com/elastic/elasticsearch/pull/42889. --- docs/reference/query-dsl/terms-query.asciidoc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/reference/query-dsl/terms-query.asciidoc b/docs/reference/query-dsl/terms-query.asciidoc index 59a7825f8a4ea..5d37bf0141a3a 100644 --- a/docs/reference/query-dsl/terms-query.asciidoc +++ b/docs/reference/query-dsl/terms-query.asciidoc @@ -87,10 +87,16 @@ By default, {es} limits the `terms` query to a maximum of 65,536 terms. This includes terms fetched using terms lookup. You can change this limit using the <> setting. +To reduce network traffic, a terms lookup will fetch the document's values from +a shard on a local data node if possible. If the your terms data is not large, +consider using an index with a single primary shard that's fully replicated +across all applicable data nodes to minimize network traffic. + To perform a terms lookup, use the following parameters. [[query-dsl-terms-lookup-params]] ====== Terms lookup parameters + `index`:: (Required, string) Name of the index from which to fetch field values. From 3845a418043d9bffd4ff26d037656eb956ec3345 Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Tue, 25 Jan 2022 10:30:48 -0500 Subject: [PATCH 021/100] [DOCS] Fix stored script example snippet (#83056) Changes: * Updates the example Painless script to be valid and aligns it with the example in [How to write a script](https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting-using.html#script-stored-scripts). * Adds a hidden snippets to delete the script for cleanup. Relates to https://github.com/elastic/elasticsearch/issues/83038 --- .../apis/create-stored-script-api.asciidoc | 13 +++++++++---- .../apis/delete-stored-script-api.asciidoc | 5 +---- .../scripting/apis/get-stored-script-api.asciidoc | 13 +++++++++---- 3 files changed, 19 insertions(+), 12 deletions(-) diff --git a/docs/reference/scripting/apis/create-stored-script-api.asciidoc b/docs/reference/scripting/apis/create-stored-script-api.asciidoc index a53472ce13ba9..6108831a836b1 100644 --- a/docs/reference/scripting/apis/create-stored-script-api.asciidoc +++ b/docs/reference/scripting/apis/create-stored-script-api.asciidoc @@ -13,14 +13,19 @@ PUT _scripts/my-stored-script { "script": { "lang": "painless", - "source": """ - TimestampHour date = doc['@timestamp'].value; - return date.getHour() - """ + "source": "Math.log(_score * 2) + params['my_modifier']" } } ---- +//// +[source,console] +---- +DELETE _scripts/my-stored-script +---- +// TEST[continued] +//// + [[create-stored-script-api-request]] ==== {api-request-title} diff --git a/docs/reference/scripting/apis/delete-stored-script-api.asciidoc b/docs/reference/scripting/apis/delete-stored-script-api.asciidoc index 7d74a7af26d5a..038d1916f76fd 100644 --- a/docs/reference/scripting/apis/delete-stored-script-api.asciidoc +++ b/docs/reference/scripting/apis/delete-stored-script-api.asciidoc @@ -14,10 +14,7 @@ PUT _scripts/my-stored-script { "script": { "lang": "painless", - "source": """ - TimestampHour date = doc['@timestamp'].value; - return date.getHour() - """ + "source": "Math.log(_score * 2) + params['my_modifier']" } } ---- diff --git a/docs/reference/scripting/apis/get-stored-script-api.asciidoc b/docs/reference/scripting/apis/get-stored-script-api.asciidoc index f7f974a1f5f1d..6b6f6648a7ed6 100644 --- a/docs/reference/scripting/apis/get-stored-script-api.asciidoc +++ b/docs/reference/scripting/apis/get-stored-script-api.asciidoc @@ -14,10 +14,7 @@ PUT _scripts/my-stored-script { "script": { "lang": "painless", - "source": """ - TimestampHour date = doc['@timestamp'].value; - return date.getHour() - """ + "source": "Math.log(_score * 2) + params['my_modifier']" } } ---- @@ -29,6 +26,14 @@ GET _scripts/my-stored-script ---- // TEST[continued] +//// +[source,console] +---- +DELETE _scripts/my-stored-script +---- +// TEST[continued] +//// + [[get-stored-script-api-request]] ==== {api-request-title} From b07852317c1c29d68d805ca6b2500984960f289d Mon Sep 17 00:00:00 2001 From: Carlos Delgado <6339205+carlosdelest@users.noreply.github.com> Date: Tue, 25 Jan 2022 16:49:18 +0100 Subject: [PATCH 022/100] Stack Monitoring: Add Enterprise Search monitoring index templates (#82743) --- .../resources/monitoring-ent-search-mb.json | 721 ++++++++++++++++++ .../MonitoringTemplateRegistry.java | 15 +- .../http/HttpExporterResourceTests.java | 2 +- 3 files changed, 736 insertions(+), 2 deletions(-) create mode 100644 x-pack/plugin/core/src/main/resources/monitoring-ent-search-mb.json diff --git a/x-pack/plugin/core/src/main/resources/monitoring-ent-search-mb.json b/x-pack/plugin/core/src/main/resources/monitoring-ent-search-mb.json new file mode 100644 index 0000000000000..fac2a026c405c --- /dev/null +++ b/x-pack/plugin/core/src/main/resources/monitoring-ent-search-mb.json @@ -0,0 +1,721 @@ +{ + "index_patterns": [".monitoring-ent-search-${xpack.stack.monitoring.template.version}-*"], + "version": ${xpack.stack.monitoring.template.release.version}, + "_meta": { + "description": "Template used by Enterprise Search Metricbeat module monitoring information for Stack Monitoring", + "managed": true + }, + "template": { + "mappings": { + "properties": { + "enterprisesearch": { + "properties": { + "cluster_uuid": { + "type": "keyword", + "ignore_above": 1024 + }, + "health": { + "properties": { + "crawler": { + "properties": { + "workers": { + "properties": { + "active": { + "type": "long" + }, + "available": { + "type": "long" + }, + "pool_size": { + "type": "long" + } + } + } + } + }, + "jvm": { + "properties": { + "gc": { + "properties": { + "collection_count": { + "type": "long" + }, + "collection_time": { + "properties": { + "ms": { + "type": "long" + } + } + } + } + }, + "memory_usage": { + "properties": { + "heap_committed": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "heap_init": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "heap_max": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "heap_used": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "non_heap_committed": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "non_heap_init": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "object_pending_finalization_count": { + "type": "long" + } + } + }, + "threads": { + "properties": { + "current": { + "type": "long" + }, + "daemon": { + "type": "long" + }, + "max": { + "type": "long" + }, + "total_started": { + "type": "long" + } + } + }, + "version": { + "type": "keyword", + "ignore_above": 1024 + } + } + }, + "name": { + "type": "keyword", + "ignore_above": 1024 + }, + "process": { + "properties": { + "filebeat": { + "properties": { + "pid": { + "type": "long" + }, + "restart_count": { + "type": "long" + }, + "time_since_last_restart": { + "properties": { + "sec": { + "type": "long" + } + } + } + } + }, + "pid": { + "type": "long" + }, + "uptime": { + "properties": { + "sec": { + "type": "long" + } + } + } + } + }, + "version": { + "properties": { + "build_hash": { + "type": "keyword", + "ignore_above": 1024 + }, + "number": { + "type": "keyword", + "ignore_above": 1024 + } + } + } + } + }, + "stats": { + "properties": { + "connectors": { + "properties": { + "job_store": { + "properties": { + "job_types": { + "properties": { + "delete": { + "type": "long" + }, + "full": { + "type": "long" + }, + "incremental": { + "type": "long" + }, + "permissions": { + "type": "long" + } + } + }, + "waiting": { + "type": "long" + }, + "working": { + "type": "long" + } + } + }, + "pool": { + "properties": { + "extract_worker_pool": { + "properties": { + "busy": { + "type": "long" + }, + "idle": { + "type": "long" + }, + "queue_depth": { + "type": "long" + }, + "size": { + "type": "long" + }, + "total_completed": { + "type": "long" + }, + "total_scheduled": { + "type": "long" + } + } + }, + "publish_worker_pool": { + "properties": { + "busy": { + "type": "long" + }, + "idle": { + "type": "long" + }, + "queue_depth": { + "type": "long" + }, + "size": { + "type": "long" + }, + "total_completed": { + "type": "long" + }, + "total_scheduled": { + "type": "long" + } + } + }, + "subextract_worker_pool": { + "properties": { + "busy": { + "type": "long" + }, + "idle": { + "type": "long" + }, + "queue_depth": { + "type": "long" + }, + "size": { + "type": "long" + }, + "total_completed": { + "type": "long" + }, + "total_scheduled": { + "type": "long" + } + } + } + } + } + } + }, + "http": { + "properties": { + "connections": { + "properties": { + "current": { + "type": "long" + }, + "max": { + "type": "long" + }, + "total": { + "type": "long" + } + } + }, + "network": { + "properties": { + "received": { + "properties": { + "bytes": { + "type": "long" + }, + "bytes_per_sec": { + "type": "long" + } + } + }, + "sent": { + "properties": { + "bytes": { + "type": "long" + }, + "bytes_per_sec": { + "type": "long" + } + } + } + } + }, + "request_duration": { + "properties": { + "max": { + "properties": { + "ms": { + "type": "long" + } + } + }, + "mean": { + "properties": { + "ms": { + "type": "long" + } + } + }, + "std_dev": { + "properties": { + "ms": { + "type": "long" + } + } + } + } + }, + "responses": { + "properties": { + "1xx": { + "type": "long" + }, + "2xx": { + "type": "long" + }, + "3xx": { + "type": "long" + }, + "4xx": { + "type": "long" + }, + "5xx": { + "type": "long" + } + } + } + } + }, + "product_usage": { + "properties": { + "app_search": { + "properties": { + "total_engines": { + "type": "long" + } + } + }, + "workplace_search": { + "properties": { + "total_org_sources": { + "type": "long" + }, + "total_private_sources": { + "type": "long" + } + } + } + } + }, + "queues": { + "properties": { + "engine_destroyer": { + "properties": { + "count": { + "type": "long" + } + } + }, + "failed": { + "properties": { + "count": { + "type": "long" + } + } + }, + "mailer": { + "properties": { + "count": { + "type": "long" + } + } + }, + "process_crawl": { + "properties": { + "count": { + "type": "long" + } + } + } + } + } + } + } + } + }, + "agent": { + "properties": { + "id": { + "type": "keyword" + }, + "version": { + "type": "keyword" + }, + "ephemeral_id": { + "type": "keyword" + }, + "type": { + "type": "keyword" + } + } + }, + "process": { + "properties": { + "pid": { + "type": "long" + } + } + }, + "@timestamp": { + "type": "date" + }, + "ecs": { + "properties": { + "version": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "event": { + "properties": { + "action": { + "type": "keyword", + "ignore_above": 1024 + }, + "agent_id_status": { + "type": "keyword", + "ignore_above": 1024 + }, + "category": { + "type": "keyword", + "ignore_above": 1024 + }, + "code": { + "type": "keyword", + "ignore_above": 1024 + }, + "created": { + "type": "date" + }, + "dataset": { + "type": "keyword", + "ignore_above": 1024 + }, + "duration": { + "type": "long" + }, + "end": { + "type": "date" + }, + "hash": { + "type": "keyword", + "ignore_above": 1024 + }, + "id": { + "type": "keyword", + "ignore_above": 1024 + }, + "ingested": { + "type": "date" + }, + "kind": { + "type": "keyword", + "ignore_above": 1024 + }, + "module": { + "type": "keyword", + "ignore_above": 1024 + }, + "original": { + "type": "keyword", + "index": false, + "doc_values": false, + "ignore_above": 1024 + }, + "outcome": { + "type": "keyword", + "ignore_above": 1024 + }, + "provider": { + "type": "keyword", + "ignore_above": 1024 + }, + "reason": { + "type": "keyword", + "ignore_above": 1024 + }, + "reference": { + "type": "keyword", + "ignore_above": 1024 + }, + "risk_score": { + "type": "float" + }, + "risk_score_norm": { + "type": "float" + }, + "sequence": { + "type": "long" + }, + "severity": { + "type": "long" + }, + "start": { + "type": "date" + }, + "timezone": { + "type": "keyword", + "ignore_above": 1024 + }, + "type": { + "type": "keyword", + "ignore_above": 1024 + }, + "url": { + "type": "keyword", + "ignore_above": 1024 + } + } + }, + "metricset": { + "properties": { + "name": { + "type": "keyword", + "ignore_above": 1024 + }, + "period": { + "type": "long" + } + } + }, + "service": { + "properties": { + "hostname": { + "ignore_above": 1024, + "type": "keyword" + }, + "address": { + "ignore_above": 1024, + "type": "keyword" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + }, + "id": { + "ignore_above": 1024, + "type": "keyword" + }, + "type": { + "ignore_above": 1024, + "type": "keyword" + }, + "version": { + "ignore_above": 1024, + "type": "keyword" + }, + "environment": { + "type": "keyword", + "ignore_above": 1024 + }, + "ephemeral_id": { + "type": "keyword", + "ignore_above": 1024 + }, + "node": { + "properties": { + "name": { + "type": "keyword", + "ignore_above": 1024 + } + } + }, + "origin": { + "properties": { + "address": { + "type": "keyword", + "ignore_above": 1024 + }, + "environment": { + "type": "keyword", + "ignore_above": 1024 + }, + "ephemeral_id": { + "type": "keyword", + "ignore_above": 1024 + }, + "id": { + "type": "keyword", + "ignore_above": 1024 + }, + "name": { + "type": "keyword", + "ignore_above": 1024 + }, + "node": { + "properties": { + "name": { + "type": "keyword", + "ignore_above": 1024 + } + } + }, + "state": { + "type": "keyword", + "ignore_above": 1024 + }, + "type": { + "type": "keyword", + "ignore_above": 1024 + }, + "version": { + "type": "keyword", + "ignore_above": 1024 + } + } + }, + "state": { + "type": "keyword", + "ignore_above": 1024 + }, + "target": { + "properties": { + "address": { + "type": "keyword", + "ignore_above": 1024 + }, + "environment": { + "type": "keyword", + "ignore_above": 1024 + }, + "ephemeral_id": { + "type": "keyword", + "ignore_above": 1024 + }, + "id": { + "type": "keyword", + "ignore_above": 1024 + }, + "name": { + "type": "keyword", + "ignore_above": 1024 + }, + "node": { + "properties": { + "name": { + "type": "keyword", + "ignore_above": 1024 + } + } + }, + "state": { + "type": "keyword", + "ignore_above": 1024 + }, + "type": { + "type": "keyword", + "ignore_above": 1024 + }, + "version": { + "type": "keyword", + "ignore_above": 1024 + } + } + } + } + }, + "host": { + "properties": { + "hostname": { + "ignore_above": 1024, + "type": "keyword" + }, + "name": { + "type": "keyword", + "ignore_above": 1024 + }, + "architecture": { + "type": "keyword", + "ignore_above": 1024 + } + } + } + } + }, + "settings": { + "index.mapping.total_fields.limit": 2000 + } + }, + "data_stream": {} +} diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java index fe54eac3bebe2..bea869222c38e 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java @@ -207,6 +207,18 @@ public class MonitoringTemplateRegistry extends IndexTemplateRegistry { STACK_TEMPLATE_VARIABLES ); + ////////////////////////////////////////////////////////// + // Enterprise Search metricbeat template (for matching ".monitoring-ent-search-8-*" indices) + ////////////////////////////////////////////////////////// + public static final String ENTERPRISE_SEARCH_STACK_INDEX_TEMPLATE_NAME = ".monitoring-ent-search-mb"; + public static final IndexTemplateConfig ENTERPRISE_SEARCH_STACK_INDEX_TEMPLATE = new IndexTemplateConfig( + ENTERPRISE_SEARCH_STACK_INDEX_TEMPLATE_NAME, + "/monitoring-ent-search-mb.json", + STACK_MONITORING_REGISTRY_VERSION, + STACK_MONITORING_REGISTRY_VERSION_VARIABLE, + STACK_TEMPLATE_VARIABLES + ); + public static final String[] TEMPLATE_NAMES = new String[] { ALERTS_INDEX_TEMPLATE_NAME, BEATS_INDEX_TEMPLATE_NAME, @@ -296,7 +308,8 @@ protected List getLegacyTemplateConfigs() { BEATS_STACK_INDEX_TEMPLATE, ES_STACK_INDEX_TEMPLATE, KIBANA_STACK_INDEX_TEMPLATE, - LOGSTASH_STACK_INDEX_TEMPLATE + LOGSTASH_STACK_INDEX_TEMPLATE, + ENTERPRISE_SEARCH_STACK_INDEX_TEMPLATE ); @Override diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterResourceTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterResourceTests.java index b09d8fb984fb4..b255bf9a7758c 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterResourceTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterResourceTests.java @@ -66,7 +66,7 @@ public class HttpExporterResourceTests extends AbstractPublishableHttpResourceTe private final boolean validLicense = randomBoolean(); /** - * kibana, logstash, and beats + * kibana, logstash, beats and enterprise search */ private final int EXPECTED_TEMPLATES = TEMPLATE_NAMES.length; private final int EXPECTED_WATCHES = ClusterAlertsUtil.WATCH_IDS.length; From 84429e6ff25fc6739892284e354f904ee504240c Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Tue, 25 Jan 2022 11:03:28 -0500 Subject: [PATCH 023/100] [DOCS] Fix typo in `action.destructive_requires_name` breaking change (#83085) In 8.0+, the `action.destructive_requires_name` setting defaults to `true`, not `false`. --- .../migration/migrate_8_0/cluster-node-setting-changes.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/migration/migrate_8_0/cluster-node-setting-changes.asciidoc b/docs/reference/migration/migrate_8_0/cluster-node-setting-changes.asciidoc index b1920b0f0c856..1889290278f11 100644 --- a/docs/reference/migration/migrate_8_0/cluster-node-setting-changes.asciidoc +++ b/docs/reference/migration/migrate_8_0/cluster-node-setting-changes.asciidoc @@ -8,7 +8,7 @@ //tag::notable-breaking-changes[] TIP: {ess-setting-change} -.`action.destructive_requires_name` now defaults to `false`. {ess-icon} +.`action.destructive_requires_name` now defaults to `true`. {ess-icon} [%collapsible] ==== *Details* + From 38daa228474acf3a9ed26ac017dcee0671cbe86e Mon Sep 17 00:00:00 2001 From: Rory Hunter Date: Tue, 25 Jan 2022 17:05:19 +0000 Subject: [PATCH 024/100] Adjust changelog categories after reorg (#83087) The Core/Features team is now Data Management, so change the allowed changelog areas to match. --- .../src/main/resources/changelog-schema.json | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/build-tools-internal/src/main/resources/changelog-schema.json b/build-tools-internal/src/main/resources/changelog-schema.json index e96e014fa19e3..c58107caa3cdf 100644 --- a/build-tools-internal/src/main/resources/changelog-schema.json +++ b/build-tools-internal/src/main/resources/changelog-schema.json @@ -34,17 +34,17 @@ "EQL", "Engine", "FIPS", - "Features/CAT APIs", - "Features/Data streams", - "Features/Features", - "Features/ILM+SLM", - "Features/Indices APIs", - "Features/Ingest", - "Features/Java High Level REST Client", - "Features/Java Low Level REST Client", - "Features/Monitoring", - "Features/Stats", - "Features/Watcher", + "Data Management/CAT APIs", + "Data Management/Data streams", + "Data Management/Features", + "Data Management/ILM+SLM", + "Data Management/Indices APIs", + "Data Management/Ingest", + "Data Management/Java High Level REST Client", + "Data Management/Java Low Level REST Client", + "Data Management/Monitoring", + "Data Management/Stats", + "Data Management/Watcher", "Geo", "Graph", "Highlighting", From 7f81877ce0ca6b2995bdb988b73609fdee0f82d6 Mon Sep 17 00:00:00 2001 From: Alan Woodward Date: Tue, 25 Jan 2022 17:30:49 +0000 Subject: [PATCH 025/100] Remove unnecessary CopyOnWriteHashMap class (#83040) This is only used in one class, and can easily be replaced by standard Java maps. --- .../common/collect/CopyOnWriteHashMap.java | 581 ------------------ .../collect/CopyOnWriteHashMapTests.java | 142 ----- .../ccr/action/AutoFollowCoordinator.java | 13 +- 3 files changed, 8 insertions(+), 728 deletions(-) delete mode 100644 server/src/main/java/org/elasticsearch/common/collect/CopyOnWriteHashMap.java delete mode 100644 server/src/test/java/org/elasticsearch/common/collect/CopyOnWriteHashMapTests.java diff --git a/server/src/main/java/org/elasticsearch/common/collect/CopyOnWriteHashMap.java b/server/src/main/java/org/elasticsearch/common/collect/CopyOnWriteHashMap.java deleted file mode 100644 index 1ea52783c7762..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/collect/CopyOnWriteHashMap.java +++ /dev/null @@ -1,581 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.common.collect; - -import org.apache.lucene.util.mutable.MutableValueInt; - -import java.lang.reflect.Array; -import java.util.AbstractMap; -import java.util.AbstractSet; -import java.util.ArrayDeque; -import java.util.Arrays; -import java.util.Collection; -import java.util.Deque; -import java.util.Iterator; -import java.util.Map; -import java.util.NoSuchElementException; -import java.util.Set; -import java.util.stream.Stream; - -/** - * An immutable map whose writes result in a new copy of the map to be created. - * - * This is essentially a hash array mapped trie: inner nodes use a bitmap in - * order to map hashes to slots by counting ones. In case of a collision (two - * values having the same 32-bits hash), a leaf node is created which stores - * and searches for values sequentially. - * - * Reads and writes both perform in logarithmic time. Null keys and values are - * not supported. - * - * This structure might need to perform several object creations per write so - * it is better suited for work-loads that are not too write-intensive. - * - * @see the wikipedia page - */ -public final class CopyOnWriteHashMap extends AbstractMap { - - private static final int TOTAL_HASH_BITS = 32; - private static final Object[] EMPTY_ARRAY = new Object[0]; - - private static final int HASH_BITS = 6; - private static final int HASH_MASK = 0x3F; - - /** - * Return a copy of the provided map. - */ - public static CopyOnWriteHashMap copyOf(Map map) { - if (map instanceof CopyOnWriteHashMap) { - // no need to copy in that case - @SuppressWarnings("unchecked") - final CopyOnWriteHashMap cowMap = (CopyOnWriteHashMap) map; - return cowMap; - } else { - return new CopyOnWriteHashMap().copyAndPutAll(map); - } - } - - /** - * Abstraction of a node, implemented by both inner and leaf nodes. - */ - private abstract static class Node { - - /** - * Recursively get the key with the given hash. - */ - abstract V get(Object key, int hash); - - /** - * Recursively add a new entry to this node. hashBits is - * the number of bits that are still set in the hash. When this value - * reaches a number that is less than or equal to {@code 0}, a leaf - * node needs to be created since it means that a collision occurred - * on the 32 bits of the hash. - */ - abstract Node put(K key, int hash, int hashBits, V value, MutableValueInt newValue); - - /** - * Recursively remove an entry from this node. - */ - abstract Node remove(Object key, int hash); - - /** - * For the current node only, append entries that are stored on this - * node to entries and sub nodes to nodes. - */ - abstract void visit(Deque> entries, Deque> nodes); - - /** - * Whether this node stores nothing under it. - */ - abstract boolean isEmpty(); - - } - - /** - * A leaf of the tree where all hashes are equal. Values are added and retrieved in linear time. - */ - private static class Leaf extends Node { - - private final K[] keys; - private final V[] values; - - Leaf(K[] keys, V[] values) { - this.keys = keys; - this.values = values; - } - - @SuppressWarnings("unchecked") - Leaf() { - this((K[]) EMPTY_ARRAY, (V[]) EMPTY_ARRAY); - } - - @Override - boolean isEmpty() { - return keys.length == 0; - } - - @Override - void visit(Deque> entries, Deque> nodes) { - for (int i = 0; i < keys.length; ++i) { - entries.add(new AbstractMap.SimpleImmutableEntry<>(keys[i], values[i])); - } - } - - @Override - V get(Object key, int hash) { - for (int i = 0; i < keys.length; i++) { - if (key.equals(keys[i])) { - return values[i]; - } - } - return null; - - } - - private static T[] replace(T[] array, int index, T value) { - final T[] copy = Arrays.copyOf(array, array.length); - copy[index] = value; - return copy; - } - - @Override - Leaf put(K key, int hash, int hashBits, V value, MutableValueInt newValue) { - assert hashBits <= 0 : hashBits; - int slot = -1; - for (int i = 0; i < keys.length; i++) { - if (key.equals(keys[i])) { - slot = i; - break; - } - } - - final K[] keys2; - final V[] values2; - - if (slot < 0) { - keys2 = appendElement(keys, key); - values2 = appendElement(values, value); - newValue.value = 1; - } else { - keys2 = replace(keys, slot, key); - values2 = replace(values, slot, value); - } - - return new Leaf<>(keys2, values2); - } - - @Override - Leaf remove(Object key, int hash) { - int slot = -1; - for (int i = 0; i < keys.length; i++) { - if (key.equals(keys[i])) { - slot = i; - break; - } - } - if (slot < 0) { - return this; - } - final K[] keys2 = removeArrayElement(keys, slot); - final V[] values2 = removeArrayElement(values, slot); - return new Leaf<>(keys2, values2); - } - } - - private static T[] removeArrayElement(T[] array, int index) { - @SuppressWarnings("unchecked") - final T[] result = (T[]) Array.newInstance(array.getClass().getComponentType(), array.length - 1); - System.arraycopy(array, 0, result, 0, index); - if (index < array.length - 1) { - System.arraycopy(array, index + 1, result, index, array.length - index - 1); - } - - return result; - } - - public static T[] appendElement(final T[] array, final T element) { - final T[] newArray = Arrays.copyOf(array, array.length + 1); - newArray[newArray.length - 1] = element; - return newArray; - } - - public static T[] insertElement(final T[] array, final T element, final int index) { - final T[] result = Arrays.copyOf(array, array.length + 1); - System.arraycopy(array, 0, result, 0, index); - result[index] = element; - if (index < array.length) { - System.arraycopy(array, index, result, index + 1, array.length - index); - } - return result; - } - - /** - * An inner node in this trie. Inner nodes store up to 64 key-value pairs - * and use a bitmap in order to associate hashes to them. For example, if - * an inner node contains 5 values, then 5 bits will be set in the bitmap - * and the ordinal of the bit set in this bit map will be the slot number. - * - * As a consequence, the number of slots in an inner node is equal to the - * number of one bits in the bitmap. - */ - private static class InnerNode extends Node { - - private final long mask; // the bitmap - private final K[] keys; - final Object[] subNodes; // subNodes[slot] is either a value or a sub node in case of a hash collision - - InnerNode(long mask, K[] keys, Object[] subNodes) { - this.mask = mask; - this.keys = keys; - this.subNodes = subNodes; - assert consistent(); - } - - // only used in assert - private boolean consistent() { - assert Long.bitCount(mask) == keys.length; - assert Long.bitCount(mask) == subNodes.length; - for (int i = 0; i < keys.length; ++i) { - if (subNodes[i] instanceof Node) { - assert keys[i] == null; - } else { - assert keys[i] != null; - } - } - return true; - } - - @Override - boolean isEmpty() { - return mask == 0; - } - - @SuppressWarnings("unchecked") - InnerNode() { - this(0, (K[]) EMPTY_ARRAY, EMPTY_ARRAY); - } - - @Override - void visit(Deque> entries, Deque> nodes) { - for (int i = 0; i < keys.length; ++i) { - final Object sub = subNodes[i]; - if (sub instanceof Node) { - @SuppressWarnings("unchecked") - final Node subNode = (Node) sub; - assert keys[i] == null; - nodes.add(subNode); - } else { - @SuppressWarnings("unchecked") - final V value = (V) sub; - entries.add(new AbstractMap.SimpleImmutableEntry<>(keys[i], value)); - } - } - } - - /** - * For a given hash on 6 bits, its value is set if the bitmap has a one - * at the corresponding index. - */ - private boolean exists(int hash6) { - return (mask & (1L << hash6)) != 0; - } - - /** - * For a given hash on 6 bits, the slot number is the number of one - * bits on the right of the hash6-th bit. - */ - private int slot(int hash6) { - return Long.bitCount(mask & ((1L << hash6) - 1)); - } - - @Override - V get(Object key, int hash) { - final int hash6 = hash & HASH_MASK; - if (exists(hash6) == false) { - return null; - } - final int slot = slot(hash6); - final Object sub = subNodes[slot]; - assert sub != null; - if (sub instanceof Node) { - assert keys[slot] == null; // keys don't make sense on inner nodes - @SuppressWarnings("unchecked") - final Node subNode = (Node) sub; - return subNode.get(key, hash >>> HASH_BITS); - } else { - if (keys[slot].equals(key)) { - @SuppressWarnings("unchecked") - final V v = (V) sub; - return v; - } else { - // we have an entry for this hash, but the value is different - return null; - } - } - } - - private Node newSubNode(int hashBits) { - if (hashBits <= 0) { - return new Leaf(); - } else { - return new InnerNode(); - } - } - - @SuppressWarnings("unchecked") - private InnerNode putExisting(K key, int hash, int hashBits, int slot, V value, MutableValueInt newValue) { - final K[] keys2 = Arrays.copyOf(keys, keys.length); - final Object[] subNodes2 = Arrays.copyOf(subNodes, subNodes.length); - - final Object previousValue = subNodes2[slot]; - if (previousValue instanceof Node) { - // insert recursively - assert keys[slot] == null; - subNodes2[slot] = ((Node) previousValue).put(key, hash, hashBits, value, newValue); - } else if (keys[slot].equals(key)) { - // replace the existing entry - subNodes2[slot] = value; - } else { - // hash collision - final K previousKey = keys[slot]; - final int previousHash = previousKey.hashCode() >>> (TOTAL_HASH_BITS - hashBits); - Node subNode = newSubNode(hashBits); - subNode = subNode.put(previousKey, previousHash, hashBits, (V) previousValue, newValue); - subNode = subNode.put(key, hash, hashBits, value, newValue); - keys2[slot] = null; - subNodes2[slot] = subNode; - } - return new InnerNode<>(mask, keys2, subNodes2); - } - - private InnerNode putNew(K key, int hash6, int slot, V value) { - final long mask2 = mask | (1L << hash6); - final K[] keys2 = insertElement(keys, key, slot); - final Object[] subNodes2 = insertElement(subNodes, value, slot); - return new InnerNode<>(mask2, keys2, subNodes2); - } - - @Override - InnerNode put(K key, int hash, int hashBits, V value, MutableValueInt newValue) { - final int hash6 = hash & HASH_MASK; - final int slot = slot(hash6); - - if (exists(hash6)) { - hash >>>= HASH_BITS; - hashBits -= HASH_BITS; - return putExisting(key, hash, hashBits, slot, value, newValue); - } else { - newValue.value = 1; - return putNew(key, hash6, slot, value); - } - } - - private InnerNode removeSlot(int hash6, int slot) { - final long mask2 = mask & ~(1L << hash6); - final K[] keys2 = removeArrayElement(keys, slot); - final Object[] subNodes2 = removeArrayElement(subNodes, slot); - return new InnerNode<>(mask2, keys2, subNodes2); - } - - @Override - InnerNode remove(Object key, int hash) { - final int hash6 = hash & HASH_MASK; - if (exists(hash6) == false) { - return this; - } - final int slot = slot(hash6); - final Object previousValue = subNodes[slot]; - if (previousValue instanceof Node) { - @SuppressWarnings("unchecked") - final Node subNode = (Node) previousValue; - final Node removed = subNode.remove(key, hash >>> HASH_BITS); - if (removed == subNode) { - // not in sub-nodes - return this; - } - if (removed.isEmpty()) { - return removeSlot(hash6, slot); - } - final K[] keys2 = Arrays.copyOf(keys, keys.length); - final Object[] subNodes2 = Arrays.copyOf(subNodes, subNodes.length); - subNodes2[slot] = removed; - return new InnerNode<>(mask, keys2, subNodes2); - } else if (keys[slot].equals(key)) { - // remove entry - return removeSlot(hash6, slot); - } else { - // hash collision, nothing to remove - return this; - } - } - - } - - private static class EntryIterator implements Iterator> { - - private final Deque> entries; - private final Deque> nodes; - - EntryIterator(Node node) { - entries = new ArrayDeque<>(); - nodes = new ArrayDeque<>(); - node.visit(entries, nodes); - } - - @Override - public boolean hasNext() { - return entries.isEmpty() == false || nodes.isEmpty() == false; - } - - @Override - public Map.Entry next() { - while (entries.isEmpty()) { - if (nodes.isEmpty()) { - throw new NoSuchElementException(); - } - final Node nextNode = nodes.pop(); - nextNode.visit(entries, nodes); - } - return entries.pop(); - } - - @Override - public final void remove() { - throw new UnsupportedOperationException(); - } - - } - - private final InnerNode root; - private final int size; - - /** - * Create a new empty map. - */ - public CopyOnWriteHashMap() { - this(new InnerNode(), 0); - } - - private CopyOnWriteHashMap(InnerNode root, int size) { - this.root = root; - this.size = size; - } - - @Override - public boolean containsKey(Object key) { - // works fine since null values are not supported - return get(key) != null; - } - - @Override - public V get(Object key) { - if (key == null) { - throw new IllegalArgumentException("null keys are not supported"); - } - final int hash = key.hashCode(); - return root.get(key, hash); - } - - @Override - public int size() { - assert size != 0 || root.isEmpty(); - return size; - } - - /** - * Associate key with value and return a new copy - * of the hash table. The current hash table is not modified. - */ - public CopyOnWriteHashMap copyAndPut(K key, V value) { - if (key == null) { - throw new IllegalArgumentException("null keys are not supported"); - } - if (value == null) { - throw new IllegalArgumentException("null values are not supported"); - } - final int hash = key.hashCode(); - final MutableValueInt newValue = new MutableValueInt(); - final InnerNode newRoot = root.put(key, hash, TOTAL_HASH_BITS, value, newValue); - final int newSize = size + newValue.value; - return new CopyOnWriteHashMap<>(newRoot, newSize); - } - - /** - * Same as {@link #copyAndPut(Object, Object)} but for an arbitrary number of entries. - */ - public CopyOnWriteHashMap copyAndPutAll(Map other) { - return copyAndPutAll(other.entrySet()); - } - - public CopyOnWriteHashMap copyAndPutAll(Iterable> entries) { - CopyOnWriteHashMap result = this; - for (Entry entry : entries) { - result = result.copyAndPut(entry.getKey(), entry.getValue()); - } - return result; - } - - public CopyOnWriteHashMap copyAndPutAll(Stream> entries) { - return copyAndPutAll(entries::iterator); - } - - /** - * Remove the given key from this map. The current hash table is not modified. - */ - public CopyOnWriteHashMap copyAndRemove(Object key) { - if (key == null) { - throw new IllegalArgumentException("null keys are not supported"); - } - final int hash = key.hashCode(); - final InnerNode newRoot = root.remove(key, hash); - if (root == newRoot) { - return this; - } else { - return new CopyOnWriteHashMap<>(newRoot, size - 1); - } - } - - /** - * Same as {@link #copyAndRemove(Object)} but for an arbitrary number of entries. - */ - public CopyOnWriteHashMap copyAndRemoveAll(Collection keys) { - CopyOnWriteHashMap result = this; - for (Object key : keys) { - result = result.copyAndRemove(key); - } - return result; - } - - @Override - public Set> entrySet() { - return new AbstractSet>() { - - @Override - public Iterator> iterator() { - return new EntryIterator<>(root); - } - - @Override - public boolean contains(Object o) { - if (o == null || (o instanceof Map.Entry) == false) { - return false; - } - Map.Entry entry = (java.util.Map.Entry) o; - return entry.getValue().equals(CopyOnWriteHashMap.this.get(entry.getKey())); - } - - @Override - public int size() { - return CopyOnWriteHashMap.this.size(); - } - }; - } - -} diff --git a/server/src/test/java/org/elasticsearch/common/collect/CopyOnWriteHashMapTests.java b/server/src/test/java/org/elasticsearch/common/collect/CopyOnWriteHashMapTests.java deleted file mode 100644 index 1a7f2f9608450..0000000000000 --- a/server/src/test/java/org/elasticsearch/common/collect/CopyOnWriteHashMapTests.java +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.common.collect; - -import org.elasticsearch.test.ESTestCase; - -import java.util.HashMap; -import java.util.Map; - -import static java.util.Collections.emptyMap; - -public class CopyOnWriteHashMapTests extends ESTestCase { - - private static class O { - - private final int value, hashCode; - - O(int value, int hashCode) { - super(); - this.value = value; - this.hashCode = hashCode; - } - - @Override - public int hashCode() { - return hashCode; - } - - @Override - public boolean equals(Object obj) { - if (obj == null || (obj instanceof O) == false) { - return false; - } - return value == ((O) obj).value; - } - } - - public void testDuel() { - final int iters = scaledRandomIntBetween(2, 5); - for (int iter = 0; iter < iters; ++iter) { - final int valueBits = randomIntBetween(1, 30); - final int hashBits = randomInt(valueBits); - // we compute the total number of ops based on the bits of the hash - // since the test is much heavier when few bits are used for the hash - final int numOps = randomInt(10 + hashBits * 100); - - Map ref = new HashMap<>(); - CopyOnWriteHashMap map = new CopyOnWriteHashMap<>(); - assertEquals(ref, map); - final int hashBase = randomInt(); - for (int i = 0; i < numOps; ++i) { - final int v = randomInt(1 << valueBits); - final int h = (v & ((1 << hashBits) - 1)) ^ hashBase; - O key = new O(v, h); - - Map newRef = new HashMap<>(ref); - final CopyOnWriteHashMap newMap; - - if (randomBoolean()) { - // ADD - Integer value = v; - newRef.put(key, value); - newMap = map.copyAndPut(key, value); - } else { - // REMOVE - final Integer removed = newRef.remove(key); - newMap = map.copyAndRemove(key); - if (removed == null) { - assertSame(map, newMap); - } - } - - assertEquals(ref, map); // make sure that the old copy has not been modified - assertEquals(newRef, newMap); - assertEquals(newMap, newRef); - - ref = newRef; - map = newMap; - } - assertEquals(ref, CopyOnWriteHashMap.copyOf(ref)); - assertEquals(emptyMap(), CopyOnWriteHashMap.copyOf(ref).copyAndRemoveAll(ref.keySet())); - } - } - - public void testCollision() { - CopyOnWriteHashMap map = new CopyOnWriteHashMap<>(); - map = map.copyAndPut(new O(3, 0), 2); - assertEquals((Integer) 2, map.get(new O(3, 0))); - assertNull(map.get(new O(5, 0))); - - map = map.copyAndPut(new O(5, 0), 5); - assertEquals((Integer) 2, map.get(new O(3, 0))); - assertEquals((Integer) 5, map.get(new O(5, 0))); - - map = map.copyAndRemove(new O(3, 0)); - assertNull(map.get(new O(3, 0))); - assertEquals((Integer) 5, map.get(new O(5, 0))); - - map = map.copyAndRemove(new O(5, 0)); - assertNull(map.get(new O(3, 0))); - assertNull(map.get(new O(5, 0))); - } - - public void testUnsupportedAPIs() { - try { - new CopyOnWriteHashMap<>().put("a", "b"); - fail(); - } catch (UnsupportedOperationException e) { - // expected - } - - try { - new CopyOnWriteHashMap<>().copyAndPut("a", "b").remove("a"); - fail(); - } catch (UnsupportedOperationException e) { - // expected - } - } - - public void testUnsupportedValues() { - try { - new CopyOnWriteHashMap<>().copyAndPut("a", null); - fail(); - } catch (IllegalArgumentException e) { - // expected - } - - try { - new CopyOnWriteHashMap<>().copyAndPut(null, "b"); - fail(); - } catch (IllegalArgumentException e) { - // expected - } - } - -} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java index 2b7c8688ff8b1..0ee1da85eb990 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java @@ -26,7 +26,6 @@ import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.collect.CopyOnWriteHashMap; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.settings.Settings; @@ -258,13 +257,13 @@ void updateAutoFollowers(ClusterState followerClusterState) { this.patterns = Set.copyOf(autoFollowMetadata.getPatterns().keySet()); - final CopyOnWriteHashMap autoFollowersCopy = CopyOnWriteHashMap.copyOf(this.autoFollowers); + final Map currentAutoFollowers = Map.copyOf(this.autoFollowers); Set newRemoteClusters = autoFollowMetadata.getPatterns() .values() .stream() .filter(AutoFollowPattern::isActive) .map(AutoFollowPattern::getRemoteCluster) - .filter(remoteCluster -> autoFollowersCopy.containsKey(remoteCluster) == false) + .filter(remoteCluster -> currentAutoFollowers.containsKey(remoteCluster) == false) .collect(Collectors.toSet()); Map newAutoFollowers = Maps.newMapWithExpectedSize(newRemoteClusters.size()); @@ -344,7 +343,7 @@ public void clusterStateProcessed(ClusterState oldState, ClusterState newState) } List removedRemoteClusters = new ArrayList<>(); - for (Map.Entry entry : autoFollowersCopy.entrySet()) { + for (Map.Entry entry : currentAutoFollowers.entrySet()) { String remoteCluster = entry.getKey(); AutoFollower autoFollower = entry.getValue(); boolean exist = autoFollowMetadata.getPatterns() @@ -365,7 +364,11 @@ public void clusterStateProcessed(ClusterState oldState, ClusterState newState) } } assert assertNoOtherActiveAutoFollower(newAutoFollowers); - this.autoFollowers = autoFollowersCopy.copyAndPutAll(newAutoFollowers).copyAndRemoveAll(removedRemoteClusters); + + Map updatedFollowers = new HashMap<>(currentAutoFollowers); + updatedFollowers.putAll(newAutoFollowers); + removedRemoteClusters.forEach(updatedFollowers.keySet()::remove); + this.autoFollowers = Collections.unmodifiableMap(updatedFollowers); } private boolean assertNoOtherActiveAutoFollower(Map newAutoFollowers) { From d3fb0149144945d58429e141fd375201612606d9 Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Tue, 25 Jan 2022 13:51:42 -0500 Subject: [PATCH 026/100] [DOCS] Reuse multi-level `join` warning (#82976) Updates and reuses a warning against creating multi-level `join` fields to make it more prominent. The current warning is low on the page, where some users may not seeing until they've already begun mapping fields. Closes https://github.com/elastic/elasticsearch/issues/82818. --- docs/reference/mapping/types/parent-join.asciidoc | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/docs/reference/mapping/types/parent-join.asciidoc b/docs/reference/mapping/types/parent-join.asciidoc index 6c0aa0c36b1cc..54feec64c24b1 100644 --- a/docs/reference/mapping/types/parent-join.asciidoc +++ b/docs/reference/mapping/types/parent-join.asciidoc @@ -8,6 +8,14 @@ The `join` data type is a special field that creates parent/child relation within documents of the same index. The `relations` section defines a set of possible relations within the documents, each relation being a parent name and a child name. + +// tag::multi-level-join-warning[] +WARNING: We don't recommend using multiple levels of relations to replicate a +relational model. Each level of relation adds an overhead at query time in terms +of memory and computation. For better search performance, denormalize your data +instead. +// end::multi-level-join-warning[] + A parent/child relation can be defined as follows: [source,console] @@ -426,9 +434,7 @@ PUT my-index-000001 ==== Multiple levels of parent join -WARNING: Using multiple levels of relations to replicate a relational model is not recommended. -Each level of relation adds an overhead at query time in terms of memory and computation. -You should de-normalize your data if you care about performance. +include::parent-join.asciidoc[tag=multi-level-join-warning] Multiple levels of parent/child: From fbcc9d5f22f40ad2fb3a831dd5770cc5ebe62ee8 Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Tue, 25 Jan 2022 21:27:01 +0200 Subject: [PATCH 027/100] Bind to non-localhost for transport in some cases (#82973) When enrolling a new node to an existing cluster, we sometimes need to bind transport to non-localhost addresse. If the other nodes of the cluster are on different hosts than this node, then the default configuration of binding transport layer to localhost will prevent this node to join the cluster even after "successful" enrollment. We check the non-localhost transport addresses that we receive during enrollment and if any of these are not in the list of non-localhost IP addresses that we gather from all interfaces of the current host, we assume that at least some other node in the cluster runs on another host. --- docs/changelog/82973.yaml | 5 +++ .../xpack/security/cli/AutoConfigureNode.java | 36 ++++++++++++++++- .../security/cli/AutoConfigureNodeTests.java | 40 +++++++++++++++++++ .../InitialNodeSecurityAutoConfiguration.java | 8 ++-- 4 files changed, 84 insertions(+), 5 deletions(-) create mode 100644 docs/changelog/82973.yaml diff --git a/docs/changelog/82973.yaml b/docs/changelog/82973.yaml new file mode 100644 index 0000000000000..d815d7e8968ae --- /dev/null +++ b/docs/changelog/82973.yaml @@ -0,0 +1,5 @@ +pr: 82973 +summary: Bind to non-localhost for transport in some cases +area: Security +type: bug +issues: [] diff --git a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/AutoConfigureNode.java b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/AutoConfigureNode.java index ef3901cf5b9b5..f0a004af88f65 100644 --- a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/AutoConfigureNode.java +++ b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/AutoConfigureNode.java @@ -54,7 +54,10 @@ import java.io.OutputStream; import java.io.OutputStreamWriter; import java.net.InetAddress; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; +import java.net.UnknownHostException; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.LinkOption; @@ -799,7 +802,11 @@ protected void execute(Terminal terminal, OptionSet options, Environment env) th bw.newLine(); bw.write("# Connections are encrypted and mutually authenticated"); bw.newLine(); - bw.write("#" + TransportSettings.HOST.getKey() + ": " + hostSettingValue(NetworkUtils.getAllAddresses())); + if (false == inEnrollmentMode + || false == anyRemoteHostNodeAddress(transportAddresses, NetworkUtils.getAllAddresses())) { + bw.write("#"); + } + bw.write(TransportSettings.HOST.getKey() + ": " + hostSettingValue(NetworkUtils.getAllAddresses())); bw.newLine(); } bw.newLine(); @@ -846,6 +853,33 @@ private String initialMasterNodesSettingValue(Environment environment) { return "[\"${HOSTNAME}\"]"; } + /** + * Determines if a node that is enrolling to an existing cluster is on a different host than the other nodes of the + * cluster. If this is the case, then the default configuration of + * binding transport layer to localhost will prevent this node to join the cluster even after "successful" enrollment. + * We check the non-localhost transport addresses that we receive during enrollment and if any of these are not in the + * list of non-localhost IP addresses that we gather from all interfaces of the current host, we assume that at least + * some other node in the cluster runs on another host. + * If the transport layer addresses we found out in enrollment are all localhost, we cannot be sure where we are still + * on the same host, but we assume that as it is safer to do so and do not bind to non localhost for this node either. + */ + protected static boolean anyRemoteHostNodeAddress(List allNodesTransportPublishAddresses, InetAddress[] allHostAddresses) { + final List allAddressesList = Arrays.asList(allHostAddresses); + for (String nodeStringAddress : allNodesTransportPublishAddresses) { + try { + final URI uri = new URI("http://" + nodeStringAddress); + final InetAddress nodeAddress = InetAddress.getByName(uri.getHost()); + if (false == nodeAddress.isLoopbackAddress() && false == allAddressesList.contains(nodeAddress)) { + // this node's address is on a remote host + return true; + } + } catch (URISyntaxException | UnknownHostException e) { + // we could fail here but if any of the transport addresses are usable, we can join the cluster + } + } + return false; + } + protected String hostSettingValue(InetAddress[] allAddresses) { if (Arrays.stream(allAddresses).anyMatch(InetAddress::isSiteLocalAddress)) { return "[_local_, _site_]"; diff --git a/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/AutoConfigureNodeTests.java b/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/AutoConfigureNodeTests.java index eea4a9ddaf33c..8f3c1c7f6bce8 100644 --- a/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/AutoConfigureNodeTests.java +++ b/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/AutoConfigureNodeTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.test.ESTestCase; import java.io.IOException; +import java.net.InetAddress; import java.nio.file.Files; import java.nio.file.Path; import java.security.KeyStore; @@ -31,7 +32,9 @@ import java.util.List; import static java.nio.file.StandardOpenOption.CREATE_NEW; +import static org.elasticsearch.xpack.security.cli.AutoConfigureNode.anyRemoteHostNodeAddress; import static org.elasticsearch.xpack.security.cli.AutoConfigureNode.removePreviousAutoconfiguration; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; public class AutoConfigureNodeTests extends ESTestCase { @@ -210,6 +213,43 @@ public void testGeneratedHTTPCertificateSANs() throws Exception { } } + public void testAnyRemoteHostNodeAddress() throws Exception { + List remoteAddresses = List.of("192.168.0.1:9300", "127.0.0.1:9300"); + InetAddress[] localAddresses = new InetAddress[] { InetAddress.getByName("192.168.0.1"), InetAddress.getByName("127.0.0.1") }; + assertThat(anyRemoteHostNodeAddress(remoteAddresses, localAddresses), equalTo(false)); + + remoteAddresses = List.of("192.168.0.1:9300", "127.0.0.1:9300", "[::1]:9300"); + localAddresses = new InetAddress[] { InetAddress.getByName("192.168.0.1"), InetAddress.getByName("127.0.0.1") }; + assertThat(anyRemoteHostNodeAddress(remoteAddresses, localAddresses), equalTo(false)); + + remoteAddresses = List.of("192.168.0.1:9300", "127.0.0.1:9300", "[::1]:9300"); + localAddresses = new InetAddress[] { + InetAddress.getByName("192.168.0.1"), + InetAddress.getByName("127.0.0.1"), + InetAddress.getByName("10.0.0.1") }; + assertThat(anyRemoteHostNodeAddress(remoteAddresses, localAddresses), equalTo(false)); + + remoteAddresses = List.of("192.168.0.1:9300", "127.0.0.1:9300", "[::1]:9300", "10.0.0.1:9301"); + localAddresses = new InetAddress[] { InetAddress.getByName("192.168.0.1"), InetAddress.getByName("127.0.0.1") }; + assertThat(anyRemoteHostNodeAddress(remoteAddresses, localAddresses), equalTo(true)); + + remoteAddresses = List.of("127.0.0.1:9300", "[::1]:9300"); + localAddresses = new InetAddress[] { InetAddress.getByName("[::1]"), InetAddress.getByName("127.0.0.1") }; + assertThat(anyRemoteHostNodeAddress(remoteAddresses, localAddresses), equalTo(false)); + + remoteAddresses = List.of("127.0.0.1:9300", "[::1]:9300"); + localAddresses = new InetAddress[] { InetAddress.getByName("192.168.2.3") }; + assertThat(anyRemoteHostNodeAddress(remoteAddresses, localAddresses), equalTo(false)); + + remoteAddresses = List.of("1.2.3.4:9300"); + localAddresses = new InetAddress[] { InetAddress.getByName("[::1]"), InetAddress.getByName("127.0.0.1") }; + assertThat(anyRemoteHostNodeAddress(remoteAddresses, localAddresses), equalTo(true)); + + remoteAddresses = List.of(); + localAddresses = new InetAddress[] { InetAddress.getByName("192.168.0.1"), InetAddress.getByName("127.0.0.1") }; + assertThat(anyRemoteHostNodeAddress(remoteAddresses, localAddresses), equalTo(false)); + } + private boolean checkGeneralNameSan(X509Certificate certificate, String generalName, int generalNameTag) throws Exception { for (List san : certificate.getSubjectAlternativeNames()) { if (san.get(0).equals(generalNameTag) && san.get(1).equals(generalName)) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/InitialNodeSecurityAutoConfiguration.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/InitialNodeSecurityAutoConfiguration.java index cdc93a9c48a6d..fb1c58edf4cf7 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/InitialNodeSecurityAutoConfiguration.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/InitialNodeSecurityAutoConfiguration.java @@ -254,7 +254,7 @@ private static void outputInformationToConsole( } else if (false == Strings.isEmpty(elasticPassword)) { builder.append( infoBullet - + " Password for the " + + " Password for the " + boldOnANSI + "elastic" + boldOffANSI @@ -271,7 +271,7 @@ private static void outputInformationToConsole( builder.append(System.lineSeparator()); if (null != caCertFingerprint) { - builder.append(infoBullet + " HTTP CA certificate SHA-256 fingerprint:"); + builder.append(infoBullet + " HTTP CA certificate SHA-256 fingerprint:"); builder.append(System.lineSeparator()); builder.append(" " + boldOnANSI + caCertFingerprint + boldOffANSI); } @@ -279,7 +279,7 @@ private static void outputInformationToConsole( builder.append(System.lineSeparator()); if (null != kibanaEnrollmentToken) { - builder.append(infoBullet + " Configure Kibana to use this cluster:"); + builder.append(infoBullet + " Configure Kibana to use this cluster:"); builder.append(System.lineSeparator()); builder.append(bullet + " Run Kibana and click the configuration link in the terminal when Kibana starts."); builder.append(System.lineSeparator()); @@ -325,7 +325,7 @@ private static void outputInformationToConsole( + ", using the enrollment token that you generated." ); } else if (Strings.isEmpty(nodeEnrollmentToken)) { - builder.append(infoBullet + " Configure other nodes to join this cluster:"); + builder.append(infoBullet + " Configure other nodes to join this cluster:"); builder.append(System.lineSeparator()); builder.append(bullet + " On this node:"); builder.append(System.lineSeparator()); From 001d0a4e8ac5479ccff72a73f9052509f1f2753f Mon Sep 17 00:00:00 2001 From: Rory Hunter Date: Tue, 25 Jan 2022 21:20:54 +0000 Subject: [PATCH 028/100] Try again to fix changelog areas after reorg (#83100) The change in 38daa228474 wasn't sufficient, apply correct values. --- .../src/main/resources/changelog-schema.json | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/build-tools-internal/src/main/resources/changelog-schema.json b/build-tools-internal/src/main/resources/changelog-schema.json index c58107caa3cdf..5fb237cd7c964 100644 --- a/build-tools-internal/src/main/resources/changelog-schema.json +++ b/build-tools-internal/src/main/resources/changelog-schema.json @@ -25,30 +25,24 @@ "Authentication", "Authorization", "Autoscaling", + "CAT APIs", "CCR", "CRUD", "Client", "Cluster Coordination", + "Data streams", "Discovery-Plugins", "Distributed", "EQL", "Engine", "FIPS", - "Data Management/CAT APIs", - "Data Management/Data streams", - "Data Management/Features", - "Data Management/ILM+SLM", - "Data Management/Indices APIs", - "Data Management/Ingest", - "Data Management/Java High Level REST Client", - "Data Management/Java Low Level REST Client", - "Data Management/Monitoring", - "Data Management/Stats", - "Data Management/Watcher", + "Features", "Geo", "Graph", "Highlighting", + "ILM+SLM", "IdentityProvider", + "Indices APIs", "Infra/CLI", "Infra/Circuit Breakers", "Infra/Core", @@ -60,9 +54,13 @@ "Infra/Scripting", "Infra/Settings", "Infra/Transport API", + "Ingest", + "Java High Level REST Client", + "Java Low Level REST Client", "License", "Machine Learning", "Mapping", + "Monitoring", "Network", "Packaging", "Percolator", @@ -76,11 +74,13 @@ "Search", "Security", "Snapshot/Restore", + "Stats", "Store", "Suggesters", "TLS", "Task Management", - "Transform" + "Transform", + "Watcher" ] }, "type": { From eda391a7edd99b700b5ec6ce8485667c1d6fb3ba Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Tue, 25 Jan 2022 16:32:56 -0500 Subject: [PATCH 029/100] [DOCS] Delete pipeline containing stored script (#83102) Adds a hidden snippet to delete the pipeline containing a stored script for cleanup. Relates to https://github.com/elastic/elasticsearch/issues/83097. --- docs/reference/ingest.asciidoc | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/reference/ingest.asciidoc b/docs/reference/ingest.asciidoc index 4c4b1b5872f16..d5c31ab3236cc 100644 --- a/docs/reference/ingest.asciidoc +++ b/docs/reference/ingest.asciidoc @@ -883,6 +883,7 @@ PUT _ingest/pipeline/my-pipeline [source,console] ---- DELETE _scripts/my-prod-tag-script +DELETE _ingest/pipeline/my-pipeline ---- // TEST[continued] //// From 4bf8aecab6169ace068bd53610d5510974832df9 Mon Sep 17 00:00:00 2001 From: Joe Gallo Date: Tue, 25 Jan 2022 16:42:38 -0500 Subject: [PATCH 030/100] Batch Index Settings Update Requests (#82896) --- docs/changelog/82896.yaml | 6 + .../MetadataUpdateSettingsService.java | 276 +++++++++--------- .../indices/cluster/ClusterStateChanges.java | 20 +- 3 files changed, 167 insertions(+), 135 deletions(-) create mode 100644 docs/changelog/82896.yaml diff --git a/docs/changelog/82896.yaml b/docs/changelog/82896.yaml new file mode 100644 index 0000000000000..8267ce5a55594 --- /dev/null +++ b/docs/changelog/82896.yaml @@ -0,0 +1,6 @@ +pr: 82896 +summary: Batch Index Settings Update Requests +area: Cluster Coordination +type: enhancement +issues: + - 79866 diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java index ce6843c71bc89..ae06e6f6f9636 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java @@ -17,6 +17,7 @@ import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateTaskExecutor; +import org.elasticsearch.cluster.ClusterStateTaskExecutor.ClusterTasksResult; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.routing.RoutingTable; @@ -50,13 +51,12 @@ public class MetadataUpdateSettingsService { private static final Logger logger = LogManager.getLogger(MetadataUpdateSettingsService.class); private final ClusterService clusterService; - private final AllocationService allocationService; - private final IndexScopedSettings indexScopedSettings; private final IndicesService indicesService; private final ShardLimitValidator shardLimitValidator; private final ThreadPool threadPool; + private final ClusterStateTaskExecutor executor; public MetadataUpdateSettingsService( ClusterService clusterService, @@ -67,11 +67,28 @@ public MetadataUpdateSettingsService( ThreadPool threadPool ) { this.clusterService = clusterService; - this.threadPool = threadPool; this.allocationService = allocationService; this.indexScopedSettings = indexScopedSettings; this.indicesService = indicesService; this.shardLimitValidator = shardLimitValidator; + this.threadPool = threadPool; + this.executor = (currentState, tasks) -> { + ClusterTasksResult.Builder builder = ClusterTasksResult.builder(); + ClusterState state = currentState; + for (AckedClusterStateUpdateTask task : tasks) { + try { + state = task.execute(state); + builder.success(task); + } catch (Exception e) { + builder.failure(task, e); + } + } + if (state != currentState) { + // reroute in case things change that require it (like number of replicas) + state = allocationService.reroute(state, "settings update"); + } + return builder.build(state); + }; } public void updateSettings(final UpdateSettingsClusterStateUpdateRequest request, final ActionListener listener) { @@ -105,149 +122,149 @@ public void updateSettings(final UpdateSettingsClusterStateUpdateRequest request final Settings openSettings = settingsForOpenIndices.build(); final boolean preserveExisting = request.isPreserveExisting(); - clusterService.submitStateUpdateTask( - "update-settings " + Arrays.toString(request.indices()), - new AckedClusterStateUpdateTask(Priority.URGENT, request, wrapPreservingContext(listener, threadPool.getThreadContext())) { + // TODO: move this to custom class instead of AckedClusterStateUpdateTask + AckedClusterStateUpdateTask clusterTask = new AckedClusterStateUpdateTask( + Priority.URGENT, + request, + wrapPreservingContext(listener, threadPool.getThreadContext()) + ) { + @Override + public ClusterState execute(ClusterState currentState) { + RoutingTable.Builder routingTableBuilder = null; + Metadata.Builder metadataBuilder = Metadata.builder(currentState.metadata()); - @Override - public ClusterState execute(ClusterState currentState) { - - RoutingTable.Builder routingTableBuilder = null; - Metadata.Builder metadataBuilder = Metadata.builder(currentState.metadata()); - - // allow to change any settings to a close index, and only allow dynamic settings to be changed - // on an open index - Set openIndices = new HashSet<>(); - Set closeIndices = new HashSet<>(); - final String[] actualIndices = new String[request.indices().length]; - for (int i = 0; i < request.indices().length; i++) { - Index index = request.indices()[i]; - actualIndices[i] = index.getName(); - final IndexMetadata metadata = currentState.metadata().getIndexSafe(index); - if (metadata.getState() == IndexMetadata.State.OPEN) { - openIndices.add(index); - } else { - closeIndices.add(index); - } + // allow to change any settings to a closed index, and only allow dynamic settings to be changed + // on an open index + Set openIndices = new HashSet<>(); + Set closedIndices = new HashSet<>(); + final String[] actualIndices = new String[request.indices().length]; + for (int i = 0; i < request.indices().length; i++) { + Index index = request.indices()[i]; + actualIndices[i] = index.getName(); + final IndexMetadata metadata = currentState.metadata().getIndexSafe(index); + if (metadata.getState() == IndexMetadata.State.OPEN) { + openIndices.add(index); + } else { + closedIndices.add(index); } + } - if (skippedSettings.isEmpty() == false && openIndices.isEmpty() == false) { - throw new IllegalArgumentException( - String.format( - Locale.ROOT, - "Can't update non dynamic settings [%s] for open indices %s", - skippedSettings, - openIndices - ) - ); - } + if (skippedSettings.isEmpty() == false && openIndices.isEmpty() == false) { + throw new IllegalArgumentException( + String.format( + Locale.ROOT, + "Can't update non dynamic settings [%s] for open indices %s", + skippedSettings, + openIndices + ) + ); + } - if (IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.exists(openSettings)) { - final int updatedNumberOfReplicas = IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(openSettings); - if (preserveExisting == false) { - // Verify that this won't take us over the cluster shard limit. - shardLimitValidator.validateShardLimitOnReplicaUpdate(currentState, request.indices(), updatedNumberOfReplicas); + if (IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.exists(openSettings)) { + final int updatedNumberOfReplicas = IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(openSettings); + if (preserveExisting == false) { + // Verify that this won't take us over the cluster shard limit. + shardLimitValidator.validateShardLimitOnReplicaUpdate(currentState, request.indices(), updatedNumberOfReplicas); - /* - * We do not update the in-sync allocation IDs as they will be removed upon the first index operation - * which makes these copies stale. - * - * TODO: should we update the in-sync allocation IDs once the data is deleted by the node? - */ - routingTableBuilder = RoutingTable.builder(currentState.routingTable()); - routingTableBuilder.updateNumberOfReplicas(updatedNumberOfReplicas, actualIndices); - metadataBuilder.updateNumberOfReplicas(updatedNumberOfReplicas, actualIndices); - logger.info("updating number_of_replicas to [{}] for indices {}", updatedNumberOfReplicas, actualIndices); - } + /* + * We do not update the in-sync allocation IDs as they will be removed upon the first index operation + * which makes these copies stale. + * + * TODO: should we update the in-sync allocation IDs once the data is deleted by the node? + */ + routingTableBuilder = RoutingTable.builder(currentState.routingTable()); + routingTableBuilder.updateNumberOfReplicas(updatedNumberOfReplicas, actualIndices); + metadataBuilder.updateNumberOfReplicas(updatedNumberOfReplicas, actualIndices); + logger.info("updating number_of_replicas to [{}] for indices {}", updatedNumberOfReplicas, actualIndices); } + } - updateIndexSettings( - openIndices, - metadataBuilder, - (index, indexSettings) -> indexScopedSettings.updateDynamicSettings( - openSettings, - indexSettings, - Settings.builder(), - index.getName() - ), - preserveExisting, - indexScopedSettings - ); + updateIndexSettings( + openIndices, + metadataBuilder, + (index, indexSettings) -> indexScopedSettings.updateDynamicSettings( + openSettings, + indexSettings, + Settings.builder(), + index.getName() + ), + preserveExisting, + indexScopedSettings + ); - updateIndexSettings( - closeIndices, - metadataBuilder, - (index, indexSettings) -> indexScopedSettings.updateSettings( - closedSettings, - indexSettings, - Settings.builder(), - index.getName() - ), - preserveExisting, - indexScopedSettings - ); + updateIndexSettings( + closedIndices, + metadataBuilder, + (index, indexSettings) -> indexScopedSettings.updateSettings( + closedSettings, + indexSettings, + Settings.builder(), + index.getName() + ), + preserveExisting, + indexScopedSettings + ); - if (IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.exists(normalizedSettings) - || IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.exists(normalizedSettings)) { - for (String index : actualIndices) { - final Settings settings = metadataBuilder.get(index).getSettings(); - MetadataCreateIndexService.validateTranslogRetentionSettings(settings); - MetadataCreateIndexService.validateStoreTypeSetting(settings); - } + if (IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.exists(normalizedSettings) + || IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.exists(normalizedSettings)) { + for (String index : actualIndices) { + final Settings settings = metadataBuilder.get(index).getSettings(); + MetadataCreateIndexService.validateTranslogRetentionSettings(settings); + MetadataCreateIndexService.validateStoreTypeSetting(settings); } - boolean changed = false; - // increment settings versions - for (final String index : actualIndices) { - if (same(currentState.metadata().index(index).getSettings(), metadataBuilder.get(index).getSettings()) == false) { - changed = true; - final IndexMetadata.Builder builder = IndexMetadata.builder(metadataBuilder.get(index)); - builder.settingsVersion(1 + builder.settingsVersion()); - metadataBuilder.put(builder); - } + } + boolean changed = false; + // increment settings versions + for (final String index : actualIndices) { + if (same(currentState.metadata().index(index).getSettings(), metadataBuilder.get(index).getSettings()) == false) { + changed = true; + final IndexMetadata.Builder builder = IndexMetadata.builder(metadataBuilder.get(index)); + builder.settingsVersion(1 + builder.settingsVersion()); + metadataBuilder.put(builder); } + } - final ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()); - boolean changedBlocks = false; - for (IndexMetadata.APIBlock block : IndexMetadata.APIBlock.values()) { - changedBlocks |= maybeUpdateClusterBlock(actualIndices, blocks, block.block, block.setting, openSettings); - } - changed |= changedBlocks; + final ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()); + boolean changedBlocks = false; + for (IndexMetadata.APIBlock block : IndexMetadata.APIBlock.values()) { + changedBlocks |= maybeUpdateClusterBlock(actualIndices, blocks, block.block, block.setting, openSettings); + } + changed |= changedBlocks; - if (changed == false) { - return currentState; - } + if (changed == false) { + return currentState; + } - ClusterState updatedState = ClusterState.builder(currentState) - .metadata(metadataBuilder) - .routingTable(routingTableBuilder == null ? currentState.routingTable() : routingTableBuilder.build()) - .blocks(changedBlocks ? blocks.build() : currentState.blocks()) - .build(); + ClusterState updatedState = ClusterState.builder(currentState) + .metadata(metadataBuilder) + .routingTable(routingTableBuilder == null ? currentState.routingTable() : routingTableBuilder.build()) + .blocks(changedBlocks ? blocks.build() : currentState.blocks()) + .build(); - // now, reroute in case things change that require it (like number of replicas) - updatedState = allocationService.reroute(updatedState, "settings update"); - try { - for (Index index : openIndices) { - final IndexMetadata currentMetadata = currentState.getMetadata().getIndexSafe(index); - final IndexMetadata updatedMetadata = updatedState.metadata().getIndexSafe(index); - indicesService.verifyIndexMetadata(currentMetadata, updatedMetadata); - } - for (Index index : closeIndices) { - final IndexMetadata currentMetadata = currentState.getMetadata().getIndexSafe(index); - final IndexMetadata updatedMetadata = updatedState.metadata().getIndexSafe(index); - // Verifies that the current index settings can be updated with the updated dynamic settings. - indicesService.verifyIndexMetadata(currentMetadata, updatedMetadata); - // Now check that we can create the index with the updated settings (dynamic and non-dynamic). - // This step is mandatory since we allow to update non-dynamic settings on closed indices. - indicesService.verifyIndexMetadata(updatedMetadata, updatedMetadata); - } - } catch (IOException ex) { - throw ExceptionsHelper.convertToElastic(ex); + try { + for (Index index : openIndices) { + final IndexMetadata currentMetadata = currentState.metadata().getIndexSafe(index); + final IndexMetadata updatedMetadata = updatedState.metadata().getIndexSafe(index); + indicesService.verifyIndexMetadata(currentMetadata, updatedMetadata); } - return updatedState; + for (Index index : closedIndices) { + final IndexMetadata currentMetadata = currentState.metadata().getIndexSafe(index); + final IndexMetadata updatedMetadata = updatedState.metadata().getIndexSafe(index); + // Verifies that the current index settings can be updated with the updated dynamic settings. + indicesService.verifyIndexMetadata(currentMetadata, updatedMetadata); + // Now check that we can create the index with the updated settings (dynamic and non-dynamic). + // This step is mandatory since we allow to update non-dynamic settings on closed indices. + indicesService.verifyIndexMetadata(updatedMetadata, updatedMetadata); + } + } catch (IOException ex) { + throw ExceptionsHelper.convertToElastic(ex); } - }, - ClusterStateTaskExecutor.unbatched() - ); + + return updatedState; + } + }; + + clusterService.submitStateUpdateTask("update-settings " + Arrays.toString(request.indices()), clusterTask, this.executor); } public static void updateIndexSettings( @@ -256,7 +273,6 @@ public static void updateIndexSettings( BiFunction settingUpdater, Boolean preserveExisting, IndexScopedSettings indexScopedSettings - ) { for (Index index : indices) { IndexMetadata indexMetadata = metadataBuilder.getSafe(index); diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java index 8a98a3dfe6762..66efd2a708e53 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java @@ -38,6 +38,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterStateTaskExecutor.ClusterTasksResult; +import org.elasticsearch.cluster.ClusterStateTaskExecutor.TaskResult; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.EmptyClusterInfoService; import org.elasticsearch.cluster.action.shard.ShardStateAction; @@ -440,7 +441,7 @@ public ClusterState applyStartedShards(ClusterState clusterState, Map ClusterState runTasks(ClusterStateTaskExecutor executor, ClusterState clusterState, List entries) { try { ClusterTasksResult result = executor.execute(clusterState, entries); - for (ClusterStateTaskExecutor.TaskResult taskResult : result.executionResults().values()) { + for (TaskResult taskResult : result.executionResults().values()) { if (taskResult.isSuccess() == false) { throw taskResult.getFailure(); } @@ -465,16 +466,25 @@ private , Response extends ActionResp }); } + @SuppressWarnings("unchecked") private ClusterState executeClusterStateUpdateTask(ClusterState state, Runnable runnable) { - ClusterState[] result = new ClusterState[1]; + ClusterState[] resultingState = new ClusterState[1]; doAnswer(invocationOnMock -> { ClusterStateUpdateTask task = (ClusterStateUpdateTask) invocationOnMock.getArguments()[1]; - result[0] = task.execute(state); + ClusterStateTaskExecutor executor = (ClusterStateTaskExecutor) invocationOnMock + .getArguments()[2]; + ClusterTasksResult result = executor.execute(state, List.of(task)); + for (TaskResult taskResult : result.executionResults().values()) { + if (taskResult.isSuccess() == false) { + throw taskResult.getFailure(); + } + } + resultingState[0] = result.resultingState(); return null; }).when(clusterService).submitStateUpdateTask(anyString(), any(ClusterStateUpdateTask.class), any()); runnable.run(); - assertThat(result[0], notNullValue()); - return result[0]; + assertThat(resultingState[0], notNullValue()); + return resultingState[0]; } private ActionListener createTestListener() { From 30b8cdc18409baf46cb4996ec11d8c6f5418d64e Mon Sep 17 00:00:00 2001 From: Toby Sutor <55087308+toby-sutor@users.noreply.github.com> Date: Tue, 25 Jan 2022 23:03:16 +0100 Subject: [PATCH 031/100] [DOCS] Add note to that log4j customization is outside the support scope (#82668) * [docs] Add note to Elasticsearch logging that log4j customization is out of support scope * Update language for default logging recommendation Co-authored-by: Adam Locke Co-authored-by: Quin Hoxie Co-authored-by: Adam Locke --- docs/reference/setup/logging-config.asciidoc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/reference/setup/logging-config.asciidoc b/docs/reference/setup/logging-config.asciidoc index d744b61491c9a..8ef1e7223eaf2 100644 --- a/docs/reference/setup/logging-config.asciidoc +++ b/docs/reference/setup/logging-config.asciidoc @@ -14,6 +14,9 @@ If you run {es} from the command line, {es} prints logs to the standard output [[loggin-configuration]] === Logging configuration +IMPORTANT: Elastic strongly recommends using the Log4j 2 configuration that is shipped by default. + + Elasticsearch uses https://logging.apache.org/log4j/2.x/[Log4j 2] for logging. Log4j 2 can be configured using the log4j2.properties file. Elasticsearch exposes three properties, `${sys:es.logs.base_path}`, From 706281aa78a528f319da05aef5e6086b94dd8184 Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Wed, 26 Jan 2022 09:58:32 +0100 Subject: [PATCH 032/100] Support GKE Workload Identity for Searchable Snapshots (#82974) * Support GKE Workload Identity for Searchable Snapshots Searchable snapshots perform naked calls of `GoogleCloudStorageBlobContainer#readBlob` without the Security Manager. The client fails to get Compute Engine credentials because of that. It works for normal snapshot/restore because they do a privileged call of `GoogleCloudStorageBlobStore.writeBlob` during the verification of the repo. The simplest fix is just to make sure `ServiceOptions.getDefaultProjectId` and `GoogleCredentials::getApplicationDefault` are get called under the SecurityManager (which they should because they perform network calls). Unfortunately, we can't write an integration test for the issue, because the test framework does the repo verification automatically, which works around the bug. Writing a unit test also seems not possible, because `ComputeEngineCredentials#getMetadataServerUrl` relies on the `GCE_METADATA_HOST` environment variable. See https://github.com/elastic/cloud-on-k8s/issues/5230 Resolves #82702 --- docs/changelog/82974.yaml | 6 ++++++ .../repositories/gcs/GoogleCloudStorageService.java | 4 ++-- 2 files changed, 8 insertions(+), 2 deletions(-) create mode 100644 docs/changelog/82974.yaml diff --git a/docs/changelog/82974.yaml b/docs/changelog/82974.yaml new file mode 100644 index 0000000000000..550fa7d757cd2 --- /dev/null +++ b/docs/changelog/82974.yaml @@ -0,0 +1,6 @@ +pr: 82974 +summary: Support GKE Workload Identity for Searchable Snapshots +area: Snapshot/Restore +type: bug +issues: + - 82702 diff --git a/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java b/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java index 42de547f504ae..1e0e27d4fa4a3 100644 --- a/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java +++ b/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java @@ -195,7 +195,7 @@ StorageOptions createStorageOptions( } else { String defaultProjectId = null; try { - defaultProjectId = ServiceOptions.getDefaultProjectId(); + defaultProjectId = SocketAccess.doPrivilegedIOException(ServiceOptions::getDefaultProjectId); if (defaultProjectId != null) { storageOptionsBuilder.setProjectId(defaultProjectId); } @@ -219,7 +219,7 @@ StorageOptions createStorageOptions( } if (gcsClientSettings.getCredential() == null) { try { - storageOptionsBuilder.setCredentials(GoogleCredentials.getApplicationDefault()); + storageOptionsBuilder.setCredentials(SocketAccess.doPrivilegedIOException(GoogleCredentials::getApplicationDefault)); } catch (Exception e) { logger.warn("failed to load Application Default Credentials", e); } From af93c9aea7126290d2022b35cc151a9f61ff72a6 Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 26 Jan 2022 09:18:19 +0000 Subject: [PATCH 033/100] Fix testConcurrentConnectsAndDisconnects (#83098) As mentioned in #77729 each call to `connectToNode` temporarily holds a reference to the connection it just opened, so we must wait for all the `connectToNode` calls to complete before we can assert that the node is no longer connected. However sometimes we run the connection validator in the background, which means the temporary reference lives for longer than the call to `connectToNode`. This commit tracks the lifetime of the temporary reference into the validator. In fact we don't need to track the calls to `connectToNode`, it's enough to wait for every call to the validator to complete. Closes #77728 --- .../ClusterConnectionManagerTests.java | 29 +++++++++---------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/transport/ClusterConnectionManagerTests.java b/server/src/test/java/org/elasticsearch/transport/ClusterConnectionManagerTests.java index f486659268ee6..a55a00f0135e7 100644 --- a/server/src/test/java/org/elasticsearch/transport/ClusterConnectionManagerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/ClusterConnectionManagerTests.java @@ -42,8 +42,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.Supplier; import static org.elasticsearch.test.ActionListenerUtils.anyActionListener; @@ -335,7 +333,6 @@ public void testConcurrentConnects() throws Exception { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/77728") public void testConcurrentConnectsAndDisconnects() throws Exception { final DiscoveryNode node = new DiscoveryNode("", new TransportAddress(InetAddress.getLoopbackAddress(), 0), Version.CURRENT); doAnswer(invocationOnMock -> { @@ -345,24 +342,27 @@ public void testConcurrentConnectsAndDisconnects() throws Exception { return null; }).when(transport).openConnection(eq(node), any(), anyActionListener()); + final Semaphore validatorPermits = new Semaphore(Integer.MAX_VALUE); + final ConnectionManager.ConnectionValidator validator = (c, p, l) -> { - if (randomBoolean()) { - l.onResponse(null); - } else { - threadPool.generic().execute(() -> l.onResponse(null)); - } + assertTrue(validatorPermits.tryAcquire()); + threadPool.executor(randomFrom(ThreadPool.Names.GENERIC, ThreadPool.Names.SAME)).execute(() -> { + try { + l.onResponse(null); + } finally { + validatorPermits.release(); + } + }); }; - final Semaphore pendingConnections = new Semaphore(1000); + final Semaphore pendingConnections = new Semaphore(between(1, 1000)); final int threadCount = between(1, 10); final CountDownLatch countDownLatch = new CountDownLatch(threadCount); - final ReadWriteLock connectCompletionLock = new ReentrantReadWriteLock(); final Runnable action = new Runnable() { @Override public void run() { if (pendingConnections.tryAcquire()) { - assertTrue(connectCompletionLock.readLock().tryLock()); connectionManager.connectToNode(node, null, validator, new ActionListener<>() { @Override public void onResponse(Releasable releasable) { @@ -385,7 +385,6 @@ public void onFailure(Exception e) { } } }); - connectCompletionLock.readLock().unlock(); } else { countDownLatch.countDown(); } @@ -396,9 +395,9 @@ public void onFailure(Exception e) { threadPool.generic().execute(action); } - assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); - assertTrue(connectCompletionLock.writeLock().tryLock(10, TimeUnit.SECONDS)); - assertFalse(connectionManager.nodeConnected(node)); + assertTrue("threads did not all complete", countDownLatch.await(10, TimeUnit.SECONDS)); + assertTrue("validatorPermits not all released", validatorPermits.tryAcquire(Integer.MAX_VALUE, 10, TimeUnit.SECONDS)); + assertFalse("node still connected", connectionManager.nodeConnected(node)); connectionManager.close(); } From edf07f5471164bbf6ad0de03666bceb32d73289b Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 26 Jan 2022 09:50:23 +0000 Subject: [PATCH 034/100] Reroute after cluster recovery (#82856) Today when starting the cluster we open the routing table and reroute in a single cluster state update, which means that this reroute runs before any cluster settings have been applied. This doesn't make much difference today since the initial reroute just triggers async shard fetching so there are no settings which affect it, but it's still surprising and would result in a bug if such a setting were introduced. With this commit we trigger the first reroute in a follow-up cluster state update instead so that any persistent settings are sure to be applied. Closes #82456 --- .../gateway/GatewayServiceIT.java | 146 ++++++++++++++++++ .../elasticsearch/gateway/GatewayService.java | 17 +- .../gateway/GatewayServiceTests.java | 30 +--- .../AbstractCoordinatorTestCase.java | 8 +- 4 files changed, 162 insertions(+), 39 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayServiceIT.java diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayServiceIT.java new file mode 100644 index 0000000000000..c53beb78361b9 --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayServiceIT.java @@ -0,0 +1,146 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gateway; + +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.allocation.AllocateUnassignedDecision; +import org.elasticsearch.cluster.routing.allocation.ExistingShardsAllocator; +import org.elasticsearch.cluster.routing.allocation.FailedShard; +import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.plugins.ClusterPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalTestCluster; + +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) +public class GatewayServiceIT extends ESIntegTestCase { + + public static final Setting TEST_SETTING = Setting.boolSetting( + "gateway.test.setting", + false, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + public static final String ALLOCATOR_NAME = "test-shards-allocator"; + + public static class TestPlugin extends Plugin implements ClusterPlugin { + + private final AtomicBoolean settingApplied = new AtomicBoolean(); + + @Override + public List> getSettings() { + return List.of(TEST_SETTING); + } + + @Override + public Collection createAllocationDeciders(Settings settings, ClusterSettings clusterSettings) { + return List.of(new TestAllocationDecider(settings, clusterSettings, settingApplied)); + } + + @Override + public Map getExistingShardsAllocators() { + return Map.of(ALLOCATOR_NAME, new ExistingShardsAllocator() { + @Override + public void beforeAllocation(RoutingAllocation allocation) { + if (allocation.routingTable().iterator().hasNext()) { + // state is recovered so we must have applied the setting + assertTrue(settingApplied.get()); + } + } + + @Override + public void afterPrimariesBeforeReplicas(RoutingAllocation allocation) {} + + @Override + public void allocateUnassigned( + ShardRouting shardRouting, + RoutingAllocation allocation, + UnassignedAllocationHandler unassignedAllocationHandler + ) { + + } + + @Override + public AllocateUnassignedDecision explainUnassignedShardAllocation( + ShardRouting unassignedShard, + RoutingAllocation routingAllocation + ) { + return AllocateUnassignedDecision.NOT_TAKEN; + } + + @Override + public void cleanCaches() {} + + @Override + public void applyStartedShards(List startedShards, RoutingAllocation allocation) {} + + @Override + public void applyFailedShards(List failedShards, RoutingAllocation allocation) {} + + @Override + public int getNumberOfInFlightFetches() { + return 0; + } + }); + } + } + + private static class TestAllocationDecider extends AllocationDecider { + TestAllocationDecider(Settings settings, ClusterSettings clusterSettings, AtomicBoolean settingApplied) { + if (TEST_SETTING.get(settings)) { + settingApplied.set(true); + } else { + clusterSettings.addSettingsUpdateConsumer(TEST_SETTING, b -> settingApplied.set(true)); + } + } + } + + @Override + protected Collection> nodePlugins() { + return CollectionUtils.appendToCopy(super.nodePlugins(), TestPlugin.class); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings)).put(TEST_SETTING.getKey(), true).build(); + } + + public void testSettingsAppliedBeforeReroute() throws Exception { + + assertAcked( + client().admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder().put(TEST_SETTING.getKey(), true)) + ); + + createIndex("test-index"); + + internalCluster().fullRestart(new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) { + return Settings.builder().put(TEST_SETTING.getKey(), false).build(); + } + }); + + ensureGreen(); + + } + +} diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayService.java b/server/src/main/java/org/elasticsearch/gateway/GatewayService.java index 307ce28cbb478..a79b8c8653094 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayService.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayService.java @@ -11,6 +11,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; @@ -20,8 +21,9 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.cluster.routing.RerouteService; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Priority; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Setting; @@ -70,8 +72,7 @@ public class GatewayService extends AbstractLifecycleComponent implements Cluste private final ThreadPool threadPool; - private final AllocationService allocationService; - + private final RerouteService rerouteService; private final ClusterService clusterService; private final TimeValue recoverAfterTime; @@ -84,11 +85,11 @@ public class GatewayService extends AbstractLifecycleComponent implements Cluste @Inject public GatewayService( final Settings settings, - final AllocationService allocationService, + final RerouteService rerouteService, final ClusterService clusterService, final ThreadPool threadPool ) { - this.allocationService = allocationService; + this.rerouteService = rerouteService; this.clusterService = clusterService; this.threadPool = threadPool; this.expectedDataNodes = EXPECTED_DATA_NODES_SETTING.get(settings); @@ -220,12 +221,10 @@ public ClusterState execute(final ClusterState currentState) { return currentState; } - final ClusterState newState = Function.identity() + return Function.identity() .andThen(ClusterStateUpdaters::updateRoutingTable) .andThen(ClusterStateUpdaters::removeStateNotRecoveredBlock) .apply(currentState); - - return allocationService.reroute(newState, "state recovered"); } @Override @@ -233,7 +232,7 @@ public void clusterStateProcessed(final ClusterState oldState, final ClusterStat logger.info("recovered [{}] indices into cluster_state", newState.metadata().indices().size()); // reset flag even though state recovery completed, to ensure that if we subsequently become leader again based on a // not-recovered state, that we again do another state recovery. - resetRecoveredFlags(); + rerouteService.reroute("state recovered", Priority.NORMAL, ActionListener.wrap(GatewayService.this::resetRecoveredFlags)); } @Override diff --git a/server/src/test/java/org/elasticsearch/gateway/GatewayServiceTests.java b/server/src/test/java/org/elasticsearch/gateway/GatewayServiceTests.java index 75b2b06cb80a1..e94d9e2135096 100644 --- a/server/src/test/java/org/elasticsearch/gateway/GatewayServiceTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/GatewayServiceTests.java @@ -12,29 +12,18 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; -import org.elasticsearch.cluster.EmptyClusterInfoService; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.routing.allocation.AllocationService; -import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; -import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; -import org.elasticsearch.cluster.routing.allocation.decider.ReplicaAfterPrimaryActiveAllocationDecider; -import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.snapshots.EmptySnapshotsInfoService; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.gateway.TestGatewayAllocator; import org.hamcrest.Matchers; -import java.util.Arrays; -import java.util.HashSet; - import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; import static org.elasticsearch.test.NodeRoles.masterNode; import static org.hamcrest.CoreMatchers.not; @@ -48,24 +37,7 @@ private GatewayService createService(final Settings.Builder settings) { new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null ); - final AllocationService allocationService = new AllocationService( - new AllocationDeciders( - new HashSet<>( - Arrays.asList( - new SameShardAllocationDecider( - Settings.EMPTY, - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) - ), - new ReplicaAfterPrimaryActiveAllocationDecider() - ) - ) - ), - new TestGatewayAllocator(), - new BalancedShardsAllocator(Settings.EMPTY), - EmptyClusterInfoService.INSTANCE, - EmptySnapshotsInfoService.INSTANCE - ); - return new GatewayService(settings.build(), allocationService, clusterService, null); + return new GatewayService(settings.build(), (reason, priority, listener) -> fail("should not reroute"), clusterService, null); } public void testDefaultRecoverAfterTime() { diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java index 116b505a253c5..b08acc5f252ac 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java @@ -34,6 +34,7 @@ import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; +import org.elasticsearch.cluster.routing.BatchedRerouteService; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterApplierService; import org.elasticsearch.cluster.service.ClusterService; @@ -1231,7 +1232,12 @@ public RecyclerBytesStreamOutput newNetworkBytesStream() { nodeHealthService ); masterService.setClusterStatePublisher(coordinator); - final GatewayService gatewayService = new GatewayService(settings, allocationService, clusterService, threadPool); + final GatewayService gatewayService = new GatewayService( + settings, + new BatchedRerouteService(clusterService, allocationService::reroute), + clusterService, + threadPool + ); logger.trace("starting up [{}]", localNode); transportService.start(); From d88d480100965859d148c15e55d9cdd38aa36266 Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Wed, 26 Jan 2022 10:52:40 +0100 Subject: [PATCH 035/100] Mute GoogleCloudStorageServiceTests.testClientInitializer (#83132) --- .../repositories/gcs/GoogleCloudStorageServiceTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageServiceTests.java b/modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageServiceTests.java index 2d858dad2849b..f37dac8fb7db2 100644 --- a/modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageServiceTests.java +++ b/modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageServiceTests.java @@ -35,6 +35,7 @@ public class GoogleCloudStorageServiceTests extends ESTestCase { + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/83131") public void testClientInitializer() throws Exception { final String clientName = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); final TimeValue connectTimeValue = TimeValue.timeValueNanos(randomIntBetween(0, 2000000)); From c77564214a77cbaf3616992743248b1db54b2909 Mon Sep 17 00:00:00 2001 From: Ievgen Degtiarenko Date: Wed, 26 Jan 2022 10:58:09 +0100 Subject: [PATCH 036/100] fix flaky of snapshot.restore/10_basic.yml yaml it (#83053) This test performs snapshot and verifies it has exactly 1 successful shard. This could fail when additional indices might be added (such as system .task index). --- .../test/snapshot.restore/10_basic.yml | 55 +++++++++++++++++-- 1 file changed, 51 insertions(+), 4 deletions(-) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/snapshot.restore/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/snapshot.restore/10_basic.yml index cba3613d9a0fc..1ea5b542625e8 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/snapshot.restore/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/snapshot.restore/10_basic.yml @@ -22,7 +22,7 @@ setup: wait_for_status: green --- -"Create a snapshot and then restore it": +"Create a snapshot with single index and then restore it": - skip: features: ["allowed_warnings"] @@ -30,10 +30,12 @@ setup: - do: snapshot.create: repository: test_repo_restore_1 - snapshot: test_snapshot + snapshot: test_snapshot_1 wait_for_completion: true + body: + indices: "test_index" - - match: { snapshot.snapshot: test_snapshot } + - match: { snapshot.snapshot: test_snapshot_1 } - match: { snapshot.state : SUCCESS } - match: { snapshot.shards.successful: 1 } - match: { snapshot.shards.failed : 0 } @@ -49,7 +51,7 @@ setup: - do: snapshot.restore: repository: test_repo_restore_1 - snapshot: test_snapshot + snapshot: test_snapshot_1 wait_for_completion: true - do: @@ -62,3 +64,48 @@ setup: - gt: { test_index.shards.0.index.size.recovered_in_bytes: 0} - match: { test_index.shards.0.index.files.reused: 0} - match: { test_index.shards.0.index.size.reused_in_bytes: 0} + +--- +"Create a snapshot and then restore single index from it": + + - skip: + features: ["allowed_warnings"] + + - do: + snapshot.create: + repository: test_repo_restore_1 + snapshot: test_snapshot_2 + wait_for_completion: true + + - match: { snapshot.snapshot: test_snapshot_2 } + - match: { snapshot.state : SUCCESS } + # snapshot can contain system indices and have shards.successful >=1 + - gt: { snapshot.shards.successful: 0 } + - match: { snapshot.shards.failed : 0 } + - is_true: snapshot.version + - gt: { snapshot.version_id: 0} + + - do: + indices.close: + index : test_index + allowed_warnings: + - "the default value for the ?wait_for_active_shards parameter will change from '0' to 'index-setting' in version 8; specify '?wait_for_active_shards=index-setting' to adopt the future default behaviour, or '?wait_for_active_shards=0' to preserve today's behaviour" + + - do: + snapshot.restore: + repository: test_repo_restore_1 + snapshot: test_snapshot_2 + wait_for_completion: true + body: + indices: "test_index" + + - do: + indices.recovery: + index: test_index + + - match: { test_index.shards.0.type: SNAPSHOT } + - match: { test_index.shards.0.stage: DONE } + - match: { test_index.shards.0.index.files.recovered: 1} + - gt: { test_index.shards.0.index.size.recovered_in_bytes: 0} + - match: { test_index.shards.0.index.files.reused: 0} + - match: { test_index.shards.0.index.size.reused_in_bytes: 0} From 788df35e339d5f1c90592d5e12c929b3f959bc78 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 26 Jan 2022 12:07:22 +0100 Subject: [PATCH 037/100] Make AllocationService#adaptAutoExpandReplicas Faster (#83092) This method is getting fairly expensive for large cluster states. In most cases it is not necessary to actually compute the `RoutingAllocation` so I made that lazy to save potentially needlessly building routing nodes. Also, parsing the auto-expand-replicas setting gets quite expensive when looping over thousands of shards in this method so I moved the auto-expand setting value into the index metadata. These changes make the method disappear from profiling in most cases and help make reroute yet a bit faster. --- docs/changelog/83092.yaml | 5 ++ ...ansportClusterAllocationExplainAction.java | 3 -- .../cluster/metadata/AutoExpandReplicas.java | 54 +++++++++++-------- .../cluster/metadata/IndexMetadata.java | 16 ++++-- .../routing/allocation/AllocationService.java | 12 ++--- .../routing/allocation/RoutingAllocation.java | 54 ++++++++++--------- .../decider/AwarenessAllocationDecider.java | 3 +- .../decider/SameShardAllocationDecider.java | 5 +- .../ClusterAllocationExplainActionTests.java | 3 +- .../allocation/AllocationServiceTests.java | 1 - .../allocation/AwarenessAllocationTests.java | 1 - .../MaxRetryAllocationDeciderTests.java | 8 +-- .../NodeVersionAllocationDeciderTests.java | 2 +- .../ResizeAllocationDeciderTests.java | 8 +-- .../allocation/SameShardRoutingTests.java | 1 - .../decider/AllocationDecidersTests.java | 11 +--- .../DiskThresholdDeciderUnitTests.java | 18 ++----- .../decider/FilterAllocationDeciderTests.java | 4 +- ...NodeReplacementAllocationDeciderTests.java | 8 +-- .../NodeShutdownAllocationDeciderTests.java | 12 ++--- ...storeInProgressAllocationDeciderTests.java | 1 - .../ReactiveStorageDeciderService.java | 18 +------ .../ReactiveStorageDeciderServiceTests.java | 18 +------ .../xpack/core/ilm/AllocationRoutedStep.java | 9 +--- .../core/ilm/SetSingleNodeAllocateStep.java | 6 +-- .../DataTierAllocationDeciderTests.java | 4 +- .../TransportGetShutdownStatusAction.java | 1 - 27 files changed, 123 insertions(+), 163 deletions(-) create mode 100644 docs/changelog/83092.yaml diff --git a/docs/changelog/83092.yaml b/docs/changelog/83092.yaml new file mode 100644 index 0000000000000..ebe3f8c9d84d1 --- /dev/null +++ b/docs/changelog/83092.yaml @@ -0,0 +1,5 @@ +pr: 83092 +summary: Make `AllocationService#adaptAutoExpandReplicas` Faster +area: Allocation +type: enhancement +issues: [] diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java index 4089f6ce3703c..09ce5606b0a74 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java @@ -20,7 +20,6 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; @@ -92,11 +91,9 @@ protected void masterOperation( final ClusterState state, final ActionListener listener ) { - final RoutingNodes routingNodes = state.getRoutingNodes(); final ClusterInfo clusterInfo = clusterInfoService.getClusterInfo(); final RoutingAllocation allocation = new RoutingAllocation( allocationDeciders, - routingNodes, state, clusterInfo, snapshotsInfoService.snapshotShardSizes(), diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/AutoExpandReplicas.java b/server/src/main/java/org/elasticsearch/cluster/metadata/AutoExpandReplicas.java index 5e37bd62a2dc4..6c76fb41c5270 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/AutoExpandReplicas.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/AutoExpandReplicas.java @@ -19,6 +19,7 @@ import java.util.List; import java.util.Map; import java.util.OptionalInt; +import java.util.function.Supplier; import static org.elasticsearch.cluster.metadata.MetadataIndexStateService.isIndexVerifiedBeforeClosed; @@ -100,28 +101,27 @@ public boolean expandToAllNodes() { return maxReplicas == Integer.MAX_VALUE; } - private OptionalInt getDesiredNumberOfReplicas(IndexMetadata indexMetadata, RoutingAllocation allocation) { - if (enabled) { - int numMatchingDataNodes = 0; - for (DiscoveryNode discoveryNode : allocation.nodes().getDataNodes().values()) { - Decision decision = allocation.deciders().shouldAutoExpandToNode(indexMetadata, discoveryNode, allocation); - if (decision.type() != Decision.Type.NO) { - numMatchingDataNodes++; - } + public OptionalInt getDesiredNumberOfReplicas(IndexMetadata indexMetadata, RoutingAllocation allocation) { + assert enabled : "should only be called when enabled"; + int numMatchingDataNodes = 0; + for (DiscoveryNode discoveryNode : allocation.nodes().getDataNodes().values()) { + Decision decision = allocation.deciders().shouldAutoExpandToNode(indexMetadata, discoveryNode, allocation); + if (decision.type() != Decision.Type.NO) { + numMatchingDataNodes++; } + } - final int min = minReplicas(); - final int max = getMaxReplicas(numMatchingDataNodes); - int numberOfReplicas = numMatchingDataNodes - 1; - if (numberOfReplicas < min) { - numberOfReplicas = min; - } else if (numberOfReplicas > max) { - numberOfReplicas = max; - } + final int min = minReplicas(); + final int max = getMaxReplicas(numMatchingDataNodes); + int numberOfReplicas = numMatchingDataNodes - 1; + if (numberOfReplicas < min) { + numberOfReplicas = min; + } else if (numberOfReplicas > max) { + numberOfReplicas = max; + } - if (numberOfReplicas >= min && numberOfReplicas <= max) { - return OptionalInt.of(numberOfReplicas); - } + if (numberOfReplicas >= min && numberOfReplicas <= max) { + return OptionalInt.of(numberOfReplicas); } return OptionalInt.empty(); } @@ -137,12 +137,22 @@ public String toString() { * The map has the desired number of replicas as key and the indices to update as value, as this allows the result * of this method to be directly applied to RoutingTable.Builder#updateNumberOfReplicas. */ - public static Map> getAutoExpandReplicaChanges(Metadata metadata, RoutingAllocation allocation) { + public static Map> getAutoExpandReplicaChanges( + Metadata metadata, + Supplier allocationSupplier + ) { Map> nrReplicasChanged = new HashMap<>(); - + // RoutingAllocation is fairly expensive to compute, only lazy create it via the supplier if we actually need it + RoutingAllocation allocation = null; for (final IndexMetadata indexMetadata : metadata) { if (indexMetadata.getState() == IndexMetadata.State.OPEN || isIndexVerifiedBeforeClosed(indexMetadata)) { - AutoExpandReplicas autoExpandReplicas = SETTING.get(indexMetadata.getSettings()); + AutoExpandReplicas autoExpandReplicas = indexMetadata.getAutoExpandReplicas(); + if (autoExpandReplicas.enabled() == false) { + continue; + } + if (allocation == null) { + allocation = allocationSupplier.get(); + } autoExpandReplicas.getDesiredNumberOfReplicas(indexMetadata, allocation).ifPresent(numberOfReplicas -> { if (numberOfReplicas != indexMetadata.getNumberOfReplicas()) { nrReplicasChanged.computeIfAbsent(numberOfReplicas, ArrayList::new).add(indexMetadata.getIndex().getName()); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java index 10ff36a5b855f..ff8da8ddd0df2 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java @@ -503,6 +503,8 @@ public static APIBlock readFrom(StreamInput input) throws IOException { private final LifecycleExecutionState lifecycleExecutionState; + private final AutoExpandReplicas autoExpandReplicas; + private IndexMetadata( final Index index, final long version, @@ -536,7 +538,8 @@ private IndexMetadata( final boolean ignoreDiskWatermarks, @Nullable final List tierPreference, final int shardsPerNodeLimit, - final LifecycleExecutionState lifecycleExecutionState + final LifecycleExecutionState lifecycleExecutionState, + final AutoExpandReplicas autoExpandReplicas ) { this.index = index; this.version = version; @@ -578,6 +581,7 @@ private IndexMetadata( this.tierPreference = tierPreference; this.shardsPerNodeLimit = shardsPerNodeLimit; this.lifecycleExecutionState = lifecycleExecutionState; + this.autoExpandReplicas = autoExpandReplicas; assert numberOfShards * routingFactor == routingNumShards : routingNumShards + " must be a multiple of " + numberOfShards; } @@ -618,7 +622,8 @@ IndexMetadata withMappingMetadata(MappingMetadata mapping) { this.ignoreDiskWatermarks, this.tierPreference, this.shardsPerNodeLimit, - this.lifecycleExecutionState + this.lifecycleExecutionState, + this.autoExpandReplicas ); } @@ -746,6 +751,10 @@ public LifecycleExecutionState getLifecycleExecutionState() { return lifecycleExecutionState; } + public AutoExpandReplicas getAutoExpandReplicas() { + return autoExpandReplicas; + } + /** * Return the concrete mapping for this index or {@code null} if this index has no mappings at all. */ @@ -1612,7 +1621,8 @@ public IndexMetadata build() { DiskThresholdDecider.SETTING_IGNORE_DISK_WATERMARKS.get(settings), tierPreference, ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING.get(settings), - lifecycleExecutionState + lifecycleExecutionState, + AutoExpandReplicas.SETTING.get(settings) ); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index 420168b5c670e..859621247e92c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -51,6 +51,7 @@ import java.util.Optional; import java.util.Set; import java.util.function.Function; +import java.util.function.Supplier; import java.util.stream.Collectors; import static java.util.Collections.emptyList; @@ -71,7 +72,7 @@ public class AllocationService { private Map existingShardsAllocators; private final ShardsAllocator shardsAllocator; private final ClusterInfoService clusterInfoService; - private SnapshotsInfoService snapshotsInfoService; + private final SnapshotsInfoService snapshotsInfoService; // only for tests that use the GatewayAllocator as the unique ExistingShardsAllocator public AllocationService( @@ -298,9 +299,8 @@ public ClusterState disassociateDeadNodes(ClusterState clusterState, boolean rer * Returns an updated cluster state if changes were necessary, or the identical cluster if no changes were required. */ public ClusterState adaptAutoExpandReplicas(ClusterState clusterState) { - RoutingAllocation allocation = new RoutingAllocation( + final Supplier allocationSupplier = () -> new RoutingAllocation( allocationDeciders, - clusterState.getRoutingNodes(), clusterState, clusterInfoService.getClusterInfo(), snapshotsInfoService.snapshotShardSizes(), @@ -308,7 +308,7 @@ public ClusterState adaptAutoExpandReplicas(ClusterState clusterState) { ); final Map> autoExpandReplicaChanges = AutoExpandReplicas.getAutoExpandReplicaChanges( clusterState.metadata(), - allocation + allocationSupplier ); if (autoExpandReplicaChanges.isEmpty()) { return clusterState; @@ -336,7 +336,7 @@ public ClusterState adaptAutoExpandReplicas(ClusterState clusterState) { .routingTable(routingTableBuilder.build()) .metadata(metadataBuilder) .build(); - assert AutoExpandReplicas.getAutoExpandReplicaChanges(fixedState.metadata(), allocation).isEmpty(); + assert AutoExpandReplicas.getAutoExpandReplicaChanges(fixedState.metadata(), allocationSupplier).isEmpty(); return fixedState; } } @@ -514,7 +514,7 @@ private boolean hasDeadNodes(RoutingAllocation allocation) { private void reroute(RoutingAllocation allocation) { assert hasDeadNodes(allocation) == false : "dead nodes should be explicitly cleaned up. See disassociateDeadNodes"; - assert AutoExpandReplicas.getAutoExpandReplicaChanges(allocation.metadata(), allocation).isEmpty() + assert AutoExpandReplicas.getAutoExpandReplicaChanges(allocation.metadata(), () -> allocation).isEmpty() : "auto-expand replicas out of sync with number of nodes in the cluster"; assert assertInitialized(); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java index 84486f774d4df..d9827add4a2f1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java @@ -21,6 +21,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.core.Nullable; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.snapshots.RestoreService.RestoreInProgressUpdater; import org.elasticsearch.snapshots.SnapshotShardSizeInfo; @@ -42,15 +43,10 @@ public class RoutingAllocation { private final AllocationDeciders deciders; + @Nullable private final RoutingNodes routingNodes; - private final Metadata metadata; - - private final RoutingTable routingTable; - - private final DiscoveryNodes nodes; - - private final ImmutableOpenMap customs; + private final ClusterState clusterState; private final ClusterInfo clusterInfo; @@ -75,19 +71,28 @@ public class RoutingAllocation { restoreInProgressUpdater ); - private final Map nodeShutdowns; private final Map nodeReplacementTargets; + public RoutingAllocation( + AllocationDeciders deciders, + ClusterState clusterState, + ClusterInfo clusterInfo, + SnapshotShardSizeInfo shardSizeInfo, + long currentNanoTime + ) { + this(deciders, null, clusterState, clusterInfo, shardSizeInfo, currentNanoTime); + } + /** * Creates a new {@link RoutingAllocation} - * @param deciders {@link AllocationDeciders} to used to make decisions for routing allocations - * @param routingNodes Routing nodes in the current cluster + * @param deciders {@link AllocationDeciders} to used to make decisions for routing allocations + * @param routingNodes Routing nodes in the current cluster or {@code null} if using those in the given cluster state * @param clusterState cluster state before rerouting * @param currentNanoTime the nano time to use for all delay allocation calculation (typically {@link System#nanoTime()}) */ public RoutingAllocation( AllocationDeciders deciders, - RoutingNodes routingNodes, + @Nullable RoutingNodes routingNodes, ClusterState clusterState, ClusterInfo clusterInfo, SnapshotShardSizeInfo shardSizeInfo, @@ -95,16 +100,12 @@ public RoutingAllocation( ) { this.deciders = deciders; this.routingNodes = routingNodes; - this.metadata = clusterState.metadata(); - this.routingTable = clusterState.routingTable(); - this.nodes = clusterState.nodes(); - this.customs = clusterState.customs(); + this.clusterState = clusterState; this.clusterInfo = clusterInfo; this.shardSizeInfo = shardSizeInfo; this.currentNanoTime = currentNanoTime; - this.nodeShutdowns = metadata.nodeShutdowns(); Map targetNameToShutdown = new HashMap<>(); - for (SingleNodeShutdownMetadata shutdown : this.nodeShutdowns.values()) { + for (SingleNodeShutdownMetadata shutdown : clusterState.metadata().nodeShutdowns().values()) { if (shutdown.getType() == SingleNodeShutdownMetadata.Type.REPLACE) { targetNameToShutdown.put(shutdown.getTargetNodeName(), shutdown); } @@ -130,7 +131,7 @@ public AllocationDeciders deciders() { * @return current routing table */ public RoutingTable routingTable() { - return routingTable; + return clusterState.routingTable(); } /** @@ -138,7 +139,10 @@ public RoutingTable routingTable() { * @return routing nodes */ public RoutingNodes routingNodes() { - return routingNodes; + if (routingNodes != null) { + return routingNodes; + } + return clusterState.getRoutingNodes(); } /** @@ -146,7 +150,7 @@ public RoutingNodes routingNodes() { * @return Metadata of routing nodes */ public Metadata metadata() { - return metadata; + return clusterState.metadata(); } /** @@ -154,7 +158,7 @@ public Metadata metadata() { * @return discovery nodes */ public DiscoveryNodes nodes() { - return nodes; + return clusterState.nodes(); } public ClusterInfo clusterInfo() { @@ -169,7 +173,7 @@ public SnapshotShardSizeInfo snapshotShardSizeInfo() { * Returns the map of node id to shutdown metadata currently in the cluster */ public Map nodeShutdowns() { - return this.nodeShutdowns; + return metadata().nodeShutdowns(); } /** @@ -181,11 +185,11 @@ public Map replacementTargetShutdowns() { @SuppressWarnings("unchecked") public T custom(String key) { - return (T) customs.get(key); + return (T) clusterState.customs().get(key); } public ImmutableOpenMap getCustoms() { - return customs; + return clusterState.getCustoms(); } public void ignoreDisable(boolean ignoreDisable) { @@ -267,7 +271,7 @@ public RoutingChangesObserver changes() { * Returns updated {@link Metadata} based on the changes that were made to the routing nodes */ public Metadata updateMetadataWithRoutingChanges(RoutingTable newRoutingTable) { - return indexMetadataUpdater.applyChanges(metadata, newRoutingTable); + return indexMetadataUpdater.applyChanges(metadata(), newRoutingTable); } /** diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java index 99e6093de02ee..5f981123e2c53 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java @@ -29,7 +29,6 @@ import static java.util.Collections.emptyList; import static java.util.stream.Collectors.toList; -import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_AUTO_EXPAND_REPLICAS_SETTING; /** * This {@link AllocationDecider} controls shard allocation based on @@ -164,7 +163,7 @@ private Decision underCapacity(ShardRouting shardRouting, RoutingNode node, Rout final boolean debug = allocation.debugDecision(); final IndexMetadata indexMetadata = allocation.metadata().getIndexSafe(shardRouting.index()); - if (INDEX_AUTO_EXPAND_REPLICAS_SETTING.get(indexMetadata.getSettings()).expandToAllNodes()) { + if (indexMetadata.getAutoExpandReplicas().expandToAllNodes()) { return YES_AUTO_EXPAND_ALL; } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java index 10682cf1912ce..2ee5c3812d7fa 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java @@ -18,8 +18,6 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; -import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_AUTO_EXPAND_REPLICAS_SETTING; - /** * An allocation decider that prevents multiple instances of the same shard to * be allocated on the same {@code node}. @@ -82,8 +80,7 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing // if its already a NO decision looking at the node, or we aren't configured to look at the host, return the decision return decision; } - if (INDEX_AUTO_EXPAND_REPLICAS_SETTING.get(allocation.metadata().getIndexSafe(shardRouting.index()).getSettings()) - .expandToAllNodes()) { + if (allocation.metadata().getIndexSafe(shardRouting.index()).getAutoExpandReplicas().expandToAllNodes()) { return YES_AUTO_EXPAND_ALL; } if (node.node() != null) { diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainActionTests.java index 13ce8462f1fb5..3f86bada8a579 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainActionTests.java @@ -52,7 +52,6 @@ public void testInitializingOrRelocatingShardExplanation() throws Exception { ShardRouting shard = clusterState.getRoutingTable().index("idx").shard(0).primaryShard(); RoutingAllocation allocation = new RoutingAllocation( new AllocationDeciders(Collections.emptyList()), - clusterState.getRoutingNodes(), clusterState, null, null, @@ -268,6 +267,6 @@ public void testFindShardAssignedToNode() { } private static RoutingAllocation routingAllocation(ClusterState clusterState) { - return new RoutingAllocation(NOOP_DECIDERS, clusterState.getRoutingNodes(), clusterState, null, null, System.nanoTime()); + return new RoutingAllocation(NOOP_DECIDERS, clusterState, null, null, System.nanoTime()); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationServiceTests.java index 51a83612c33c9..d117d34bc95a4 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationServiceTests.java @@ -260,7 +260,6 @@ public void testExplainsNonAllocationOfShardWithUnknownAllocator() { final RoutingAllocation allocation = new RoutingAllocation( new AllocationDeciders(Collections.emptyList()), - clusterState.getRoutingNodes(), clusterState, ClusterInfo.EMPTY, null, diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java index ba5e8d292509f..9e725a398445a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java @@ -1105,7 +1105,6 @@ private void testExplanation( final RoutingAllocation routingAllocation = new RoutingAllocation( new AllocationDeciders(singletonList(decider)), - clusterState.getRoutingNodes(), clusterState, null, null, diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java index f8dde00ac4597..41710a7ba7fda 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java @@ -202,7 +202,7 @@ public void testFailedAllocation() { new MaxRetryAllocationDecider().canForceAllocatePrimary( unassignedPrimary, null, - new RoutingAllocation(null, null, clusterState, null, null, 0) + new RoutingAllocation(null, clusterState, null, null, 0) ) ); } @@ -231,7 +231,7 @@ public void testFailedAllocation() { new MaxRetryAllocationDecider().canForceAllocatePrimary( unassignedPrimary, null, - new RoutingAllocation(null, null, clusterState, null, null, 0) + new RoutingAllocation(null, clusterState, null, null, 0) ) ); } @@ -271,7 +271,7 @@ public void testFailedAllocation() { new MaxRetryAllocationDecider().canForceAllocatePrimary( routingTable.index("idx").shard(0).shards().get(0), null, - new RoutingAllocation(null, null, clusterState, null, null, 0) + new RoutingAllocation(null, clusterState, null, null, 0) ) ); @@ -308,7 +308,7 @@ public void testFailedAllocation() { new MaxRetryAllocationDecider().canForceAllocatePrimary( unassignedPrimary, null, - new RoutingAllocation(null, null, clusterState, null, null, 0) + new RoutingAllocation(null, clusterState, null, null, 0) ) ); } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java index 36755b74b6d81..113beb2f90bd6 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java @@ -594,7 +594,7 @@ public void testMessages() { final ShardRouting primaryShard = clusterState.routingTable().shardRoutingTable(shardId).primaryShard(); final ShardRouting replicaShard = clusterState.routingTable().shardRoutingTable(shardId).replicaShards().get(0); - RoutingAllocation routingAllocation = new RoutingAllocation(null, clusterState.getRoutingNodes(), clusterState, null, null, 0); + RoutingAllocation routingAllocation = new RoutingAllocation(null, clusterState, null, null, 0); routingAllocation.debugDecision(true); final NodeVersionAllocationDecider allocationDecider = new NodeVersionAllocationDecider(); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ResizeAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ResizeAllocationDeciderTests.java index 1c8bf5643219c..d47c6952db424 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ResizeAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ResizeAllocationDeciderTests.java @@ -105,7 +105,7 @@ private ClusterState createInitialClusterState(boolean startShards) { public void testNonResizeRouting() { ClusterState clusterState = createInitialClusterState(true); ResizeAllocationDecider resizeAllocationDecider = new ResizeAllocationDecider(); - RoutingAllocation routingAllocation = new RoutingAllocation(null, null, clusterState, null, null, 0); + RoutingAllocation routingAllocation = new RoutingAllocation(null, clusterState, null, null, 0); ShardRouting shardRouting = TestShardRouting.newShardRouting("non-resize", 0, null, true, ShardRoutingState.UNASSIGNED); assertEquals(Decision.ALWAYS, resizeAllocationDecider.canAllocate(shardRouting, routingAllocation)); assertEquals( @@ -134,7 +134,7 @@ public void testShrink() { // we don't handle shrink yet Index idx = clusterState.metadata().index("target").getIndex(); ResizeAllocationDecider resizeAllocationDecider = new ResizeAllocationDecider(); - RoutingAllocation routingAllocation = new RoutingAllocation(null, null, clusterState, null, null, 0); + RoutingAllocation routingAllocation = new RoutingAllocation(null, clusterState, null, null, 0); ShardRouting shardRouting = TestShardRouting.newShardRouting( new ShardId(idx, 0), null, @@ -173,7 +173,7 @@ public void testSourceNotActive() { Index idx = clusterState.metadata().index("target").getIndex(); ResizeAllocationDecider resizeAllocationDecider = new ResizeAllocationDecider(); - RoutingAllocation routingAllocation = new RoutingAllocation(null, clusterState.getRoutingNodes(), clusterState, null, null, 0); + RoutingAllocation routingAllocation = new RoutingAllocation(null, clusterState, null, null, 0); int shardId = randomIntBetween(0, 3); int sourceShardId = IndexMetadata.selectSplitShard(shardId, clusterState.metadata().index("source"), 4).id(); ShardRouting shardRouting = TestShardRouting.newShardRouting( @@ -230,7 +230,7 @@ public void testSourcePrimaryActive() { Index idx = clusterState.metadata().index("target").getIndex(); ResizeAllocationDecider resizeAllocationDecider = new ResizeAllocationDecider(); - RoutingAllocation routingAllocation = new RoutingAllocation(null, clusterState.getRoutingNodes(), clusterState, null, null, 0); + RoutingAllocation routingAllocation = new RoutingAllocation(null, clusterState, null, null, 0); int shardId = randomIntBetween(0, 3); int sourceShardId = IndexMetadata.selectSplitShard(shardId, clusterState.metadata().index("source"), 4).id(); ShardRouting shardRouting = TestShardRouting.newShardRouting( diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java index de95b2f9d2c9d..419c1729d3329 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java @@ -217,7 +217,6 @@ public void testSameHostCheckWithExplain() { final RoutingAllocation routingAllocation = new RoutingAllocation( new AllocationDeciders(singletonList(decider)), - clusterState.getRoutingNodes(), clusterState, null, null, diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDecidersTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDecidersTests.java index d310947434811..1c071a21605bc 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDecidersTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDecidersTests.java @@ -79,7 +79,7 @@ public Decision canRebalance(RoutingAllocation allocation) { })); ClusterState clusterState = ClusterState.builder(new ClusterName("test")).build(); - final RoutingAllocation allocation = new RoutingAllocation(deciders, clusterState.getRoutingNodes(), clusterState, null, null, 0L); + final RoutingAllocation allocation = new RoutingAllocation(deciders, clusterState, null, null, 0L); allocation.setDebugMode(mode); final UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "_message"); @@ -217,14 +217,7 @@ private Decision decision(RoutingAllocation allocation) { .numberOfReplicas(0) .build(); - final RoutingAllocation allocation = new RoutingAllocation( - allocationDeciders, - clusterState.getRoutingNodes(), - clusterState, - null, - null, - 0L - ); + final RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, clusterState, null, null, 0L); assertSame(Decision.NO, allocationDeciders.canAllocate(shardRouting, routingNode, allocation)); assertSame(Decision.NO, allocationDeciders.canRebalance(shardRouting, allocation)); assertSame(Decision.NO, allocationDeciders.canRemain(shardRouting, routingNode, allocation)); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java index 7589c66f9f99b..6aad36b22482a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java @@ -113,7 +113,6 @@ public void testCanAllocateUsesMaxAvailableSpace() { ); RoutingAllocation allocation = new RoutingAllocation( new AllocationDeciders(Collections.singleton(decider)), - clusterState.getRoutingNodes(), clusterState, clusterInfo, null, @@ -196,7 +195,6 @@ public void testCannotAllocateDueToLackOfDiskResources() { ); RoutingAllocation allocation = new RoutingAllocation( new AllocationDeciders(Collections.singleton(decider)), - clusterState.getRoutingNodes(), clusterState, clusterInfo, null, @@ -319,7 +317,6 @@ public void testCanRemainUsesLeastAvailableSpace() { ); RoutingAllocation allocation = new RoutingAllocation( new AllocationDeciders(Collections.singleton(decider)), - clusterState.getRoutingNodes(), clusterState, clusterInfo, null, @@ -397,7 +394,7 @@ public void testShardSizeAndRelocatingSize() { ClusterState clusterState = ClusterState.builder( org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY) ).metadata(metadata).routingTable(routingTableBuilder.build()).build(); - RoutingAllocation allocation = new RoutingAllocation(null, null, clusterState, info, null, 0); + RoutingAllocation allocation = new RoutingAllocation(null, clusterState, info, null, 0); final Index index = new Index("test", "1234"); ShardRouting test_0 = ShardRouting.newUnassigned( @@ -547,7 +544,7 @@ public void testSizeShrinkIndex() { clusterState.getRoutingTable().index("test").shardsWithState(ShardRoutingState.UNASSIGNED) ); - RoutingAllocation allocation = new RoutingAllocation(null, clusterState.getRoutingNodes(), clusterState, info, null, 0); + RoutingAllocation allocation = new RoutingAllocation(null, clusterState, info, null, 0); final Index index = new Index("test", "1234"); ShardRouting test_0 = ShardRouting.newUnassigned( @@ -619,14 +616,7 @@ public void testSizeShrinkIndex() { .build(); allocationService.reroute(clusterState, "foo"); - RoutingAllocation allocationWithMissingSourceIndex = new RoutingAllocation( - null, - clusterStateWithMissingSourceIndex.getRoutingNodes(), - clusterStateWithMissingSourceIndex, - info, - null, - 0 - ); + RoutingAllocation allocationWithMissingSourceIndex = new RoutingAllocation(null, clusterStateWithMissingSourceIndex, info, null, 0); assertEquals(42L, getExpectedShardSize(target, 42L, allocationWithMissingSourceIndex)); assertEquals(42L, getExpectedShardSize(target2, 42L, allocationWithMissingSourceIndex)); } @@ -768,7 +758,6 @@ public void testDecidesYesIfWatermarksIgnored() { ); RoutingAllocation allocation = new RoutingAllocation( new AllocationDeciders(Collections.singleton(decider)), - clusterState.getRoutingNodes(), clusterState, clusterInfo, null, @@ -845,7 +834,6 @@ public void testCannotForceAllocateOver100PercentUsage() { ); RoutingAllocation allocation = new RoutingAllocation( new AllocationDeciders(Collections.singleton(decider)), - clusterState.getRoutingNodes(), clusterState, clusterInfo, null, diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java index 81acdbc8d2168..4dec7b24e4645 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java @@ -72,7 +72,7 @@ public void testFilterInitialRecovery() { assertNull(routingTable.index("idx").shard(0).shards().get(0).currentNodeId()); // after failing the shard we are unassigned since the node is blacklisted and we can't initialize on the other node - RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state.getRoutingNodes(), state, null, null, 0); + RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state, null, null, 0); allocation.debugDecision(true); Decision.Single decision = (Decision.Single) filterAllocationDecider.canAllocate( routingTable.index("idx").shard(0).primaryShard(), @@ -130,7 +130,7 @@ public void testFilterInitialRecovery() { assertEquals(routingTable.index("idx").shard(0).primaryShard().state(), INITIALIZING); assertEquals(routingTable.index("idx").shard(0).primaryShard().currentNodeId(), "node1"); - allocation = new RoutingAllocation(allocationDeciders, state.getRoutingNodes(), state, null, null, 0); + allocation = new RoutingAllocation(allocationDeciders, state, null, null, 0); allocation.debugDecision(true); decision = (Decision.Single) filterAllocationDecider.canAllocate( routingTable.index("idx").shard(0).shards().get(0), diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/NodeReplacementAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/NodeReplacementAllocationDeciderTests.java index bd22bfe215ff4..1d37e0f9c3572 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/NodeReplacementAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/NodeReplacementAllocationDeciderTests.java @@ -85,7 +85,7 @@ public void testNoReplacements() { .nodes(DiscoveryNodes.builder().add(NODE_A).add(NODE_B).add(NODE_C).build()) .build(); - RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state.getRoutingNodes(), state, null, null, 0); + RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state, null, null, 0); DiscoveryNode node = randomFrom(NODE_A, NODE_B, NODE_C); RoutingNode routingNode = new RoutingNode(node.getId(), node, shard); allocation.debugDecision(true); @@ -101,7 +101,7 @@ public void testNoReplacements() { public void testCanForceAllocate() { ClusterState state = prepareState(service.reroute(ClusterState.EMPTY_STATE, "initial state"), NODE_A.getId(), NODE_B.getName()); - RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state.getRoutingNodes(), state, null, null, 0); + RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state, null, null, 0); RoutingNode routingNode = new RoutingNode(NODE_A.getId(), NODE_A, shard); allocation.debugDecision(true); @@ -144,7 +144,7 @@ public void testCanForceAllocate() { public void testCannotRemainOnReplacedNode() { ClusterState state = prepareState(service.reroute(ClusterState.EMPTY_STATE, "initial state"), NODE_A.getId(), NODE_B.getName()); - RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state.getRoutingNodes(), state, null, null, 0); + RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state, null, null, 0); RoutingNode routingNode = new RoutingNode(NODE_A.getId(), NODE_A, shard); allocation.debugDecision(true); @@ -170,7 +170,7 @@ public void testCannotRemainOnReplacedNode() { public void testCanAllocateToNeitherSourceNorTarget() { ClusterState state = prepareState(service.reroute(ClusterState.EMPTY_STATE, "initial state"), NODE_A.getId(), NODE_B.getName()); - RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state.getRoutingNodes(), state, null, null, 0); + RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state, null, null, 0); RoutingNode routingNode = new RoutingNode(NODE_A.getId(), NODE_A, shard); allocation.debugDecision(true); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/NodeShutdownAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/NodeShutdownAllocationDeciderTests.java index 7803bbc8e4279..6c85fe062dfd4 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/NodeShutdownAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/NodeShutdownAllocationDeciderTests.java @@ -81,7 +81,7 @@ public void testCanAllocateShardsToRestartingNode() { service.reroute(ClusterState.EMPTY_STATE, "initial state"), SingleNodeShutdownMetadata.Type.RESTART ); - RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state.getRoutingNodes(), state, null, null, 0); + RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state, null, null, 0); RoutingNode routingNode = new RoutingNode(DATA_NODE.getId(), DATA_NODE, shard); allocation.debugDecision(true); @@ -98,7 +98,7 @@ public void testCannotAllocateShardsToRemovingNode() { service.reroute(ClusterState.EMPTY_STATE, "initial state"), randomFrom(SingleNodeShutdownMetadata.Type.REMOVE, SingleNodeShutdownMetadata.Type.REPLACE) ); - RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state.getRoutingNodes(), state, null, null, 0); + RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state, null, null, 0); RoutingNode routingNode = new RoutingNode(DATA_NODE.getId(), DATA_NODE, shard); allocation.debugDecision(true); @@ -112,7 +112,7 @@ public void testShardsCanRemainOnRestartingNode() { service.reroute(ClusterState.EMPTY_STATE, "initial state"), SingleNodeShutdownMetadata.Type.RESTART ); - RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state.getRoutingNodes(), state, null, null, 0); + RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state, null, null, 0); RoutingNode routingNode = new RoutingNode(DATA_NODE.getId(), DATA_NODE, shard); allocation.debugDecision(true); @@ -129,7 +129,7 @@ public void testShardsCannotRemainOnRemovingNode() { service.reroute(ClusterState.EMPTY_STATE, "initial state"), randomFrom(SingleNodeShutdownMetadata.Type.REMOVE, SingleNodeShutdownMetadata.Type.REPLACE) ); - RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state.getRoutingNodes(), state, null, null, 0); + RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state, null, null, 0); RoutingNode routingNode = new RoutingNode(DATA_NODE.getId(), DATA_NODE, shard); allocation.debugDecision(true); @@ -143,7 +143,7 @@ public void testCannotAutoExpandToRestartingNode() { service.reroute(ClusterState.EMPTY_STATE, "initial state"), SingleNodeShutdownMetadata.Type.RESTART ); - RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state.getRoutingNodes(), state, null, null, 0); + RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state, null, null, 0); allocation.debugDecision(true); Decision decision = decider.shouldAutoExpandToNode(indexMetadata, DATA_NODE, allocation); @@ -159,7 +159,7 @@ public void testCannotAutoExpandToRemovingNode() { service.reroute(ClusterState.EMPTY_STATE, "initial state"), randomFrom(SingleNodeShutdownMetadata.Type.REMOVE, SingleNodeShutdownMetadata.Type.REPLACE) ); - RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state.getRoutingNodes(), state, null, null, 0); + RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state, null, null, 0); allocation.debugDecision(true); Decision decision = decider.shouldAutoExpandToNode(indexMetadata, DATA_NODE, allocation); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/RestoreInProgressAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/RestoreInProgressAllocationDeciderTests.java index 7d5458118df7a..5b8f80101163c 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/RestoreInProgressAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/RestoreInProgressAllocationDeciderTests.java @@ -202,7 +202,6 @@ private Decision executeAllocation(final ClusterState clusterState, final ShardR final AllocationDecider decider = new RestoreInProgressAllocationDecider(); final RoutingAllocation allocation = new RoutingAllocation( new AllocationDeciders(Collections.singleton(decider)), - clusterState.getRoutingNodes(), clusterState, null, null, diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java index 03a694511787e..cf4faaee98d30 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java @@ -227,14 +227,7 @@ public static class AllocationState { } public long storagePreventsAllocation() { - RoutingAllocation allocation = new RoutingAllocation( - allocationDeciders, - state.getRoutingNodes(), - state, - info, - shardSizeInfo, - System.nanoTime() - ); + RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state, info, shardSizeInfo, System.nanoTime()); return StreamSupport.stream(state.getRoutingNodes().unassigned().spliterator(), false) .filter(shard -> canAllocate(shard, allocation) == false) .filter(shard -> cannotAllocateDueToStorage(shard, allocation)) @@ -243,14 +236,7 @@ public long storagePreventsAllocation() { } public long storagePreventsRemainOrMove() { - RoutingAllocation allocation = new RoutingAllocation( - allocationDeciders, - state.getRoutingNodes(), - state, - info, - shardSizeInfo, - System.nanoTime() - ); + RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state, info, shardSizeInfo, System.nanoTime()); List candidates = new LinkedList<>(); for (RoutingNode routingNode : state.getRoutingNodes()) { diff --git a/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderServiceTests.java b/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderServiceTests.java index 6cc2501e3079e..c644cbd656708 100644 --- a/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderServiceTests.java +++ b/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderServiceTests.java @@ -534,14 +534,7 @@ public boolean canRemainWithNoNodes(ClusterState clusterState, ShardRouting shar Set.of(DiscoveryNodeRole.DATA_WARM_NODE_ROLE) ); - RoutingAllocation allocation = new RoutingAllocation( - allocationDeciders, - clusterState.getRoutingNodes(), - clusterState, - null, - null, - randomLong() - ); + RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, clusterState, null, null, randomLong()); return allocationState.canRemainOnlyHighestTierPreference(shardRouting, allocation); } @@ -646,14 +639,7 @@ private void verifyNeedsWarmTier( Set.of(DiscoveryNodeRole.DATA_WARM_NODE_ROLE) ); - RoutingAllocation allocation = new RoutingAllocation( - allocationDeciders, - clusterState.getRoutingNodes(), - clusterState, - null, - null, - randomLong() - ); + RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, clusterState, null, null, randomLong()); assertThat(allocationState.needsThisTier(shardRouting, allocation), is(expected)); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/AllocationRoutedStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/AllocationRoutedStep.java index 6d099589a03a8..16362c36f8102 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/AllocationRoutedStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/AllocationRoutedStep.java @@ -90,14 +90,7 @@ public Result isConditionMet(Index index, ClusterState clusterState) { static int getPendingAllocations(Index index, AllocationDeciders allocationDeciders, ClusterState clusterState) { // All the allocation attributes are already set so just need to check // if the allocation has happened - RoutingAllocation allocation = new RoutingAllocation( - allocationDeciders, - clusterState.getRoutingNodes(), - clusterState, - null, - null, - System.nanoTime() - ); + RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, clusterState, null, null, System.nanoTime()); int allocationPendingAllShards = 0; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SetSingleNodeAllocateStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SetSingleNodeAllocateStep.java index 8d3b6a5324b43..0bfc3f92d4d4b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SetSingleNodeAllocateStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SetSingleNodeAllocateStep.java @@ -16,7 +16,6 @@ import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.routing.RoutingNode; -import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; @@ -80,8 +79,7 @@ public void performAction( new NodeReplacementAllocationDecider() ) ); - final RoutingNodes routingNodes = clusterState.getRoutingNodes(); - RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState, null, null, System.nanoTime()); + RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, clusterState, null, null, System.nanoTime()); List validNodeIds = new ArrayList<>(); String indexName = indexMetadata.getIndex().getName(); final Map> routingsByShardId = clusterState.getRoutingTable() @@ -90,7 +88,7 @@ public void performAction( .collect(Collectors.groupingBy(ShardRouting::shardId)); if (routingsByShardId.isEmpty() == false) { - for (RoutingNode node : routingNodes) { + for (RoutingNode node : allocation.routingNodes()) { boolean canAllocateOneCopyOfEachShard = routingsByShardId.values() .stream() // For each shard .allMatch( diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderTests.java index b7dea77029521..2777aeb076897 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderTests.java @@ -106,7 +106,7 @@ public void testIndexPrefer() { .build() ) .build(); - RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state.getRoutingNodes(), state, null, null, 0); + RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state, null, null, 0); allocation.debugDecision(true); Decision d; RoutingNode node; @@ -154,7 +154,7 @@ public void testIndexPrefer() { .build() ) .build(); - allocation = new RoutingAllocation(allocationDeciders, state.getRoutingNodes(), state, null, null, 0); + allocation = new RoutingAllocation(allocationDeciders, state, null, null, 0); allocation.debugDecision(true); for (DiscoveryNode n : Arrays.asList(HOT_NODE, WARM_NODE)) { diff --git a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusAction.java b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusAction.java index 01fba0a336a63..441f967410f87 100644 --- a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusAction.java +++ b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusAction.java @@ -217,7 +217,6 @@ static ShutdownShardMigrationStatus shardMigrationStatus( // If there's no relocating shards and shards still on this node, we need to figure out why final RoutingAllocation allocation = new RoutingAllocation( allocationDeciders, - currentState.getRoutingNodes(), currentState, clusterInfoService.getClusterInfo(), snapshotsInfoService.snapshotShardSizes(), From 9633883c64cb55fc78d60ee12ea2c6f953c2e5f4 Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 26 Jan 2022 11:16:43 +0000 Subject: [PATCH 038/100] Add note on truststore for S3-compatible repos (#82669) Today we note that the `repository-s3` plugin uses the JVM-wide truststore in the docs for the `protocol` client setting, but it turns out that this is easy to overlook since most installations will not need to change the `protocol`. This commit adds the same detail to the section on S3-compatible repositories where it is more likely to be found. --- docs/reference/snapshot-restore/repository-s3.asciidoc | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/docs/reference/snapshot-restore/repository-s3.asciidoc b/docs/reference/snapshot-restore/repository-s3.asciidoc index 87542cc7d0ce4..cc9f86237fc34 100644 --- a/docs/reference/snapshot-restore/repository-s3.asciidoc +++ b/docs/reference/snapshot-restore/repository-s3.asciidoc @@ -207,8 +207,13 @@ the `repository-s3` type allows you to use these systems in place of AWS S3. To do so, you should set the `s3.client.CLIENT_NAME.endpoint` setting to the system's endpoint. This setting accepts IP addresses and hostnames and may include a port. For example, the endpoint may be `172.17.0.2` or -`172.17.0.2:9000`. You may also need to set `s3.client.CLIENT_NAME.protocol` to -`http` if the endpoint does not support HTTPS. +`172.17.0.2:9000`. + +By default {es} communicates with your storage system using HTTPS, and +validates the repository's certificate chain using the JVM-wide truststore. +Ensure that the JVM-wide truststore includes an entry for your repository. If +you wish to use unsecured HTTP communication instead of HTTPS, set +`s3.client.CLIENT_NAME.protocol` to `http`. https://minio.io[MinIO] is an example of a storage system that provides an S3-compatible API. The `repository-s3` type allows {es} to work with From c432ef1cff54d0601976e9c465e06a912a844a31 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Wed, 26 Jan 2022 22:32:54 +1100 Subject: [PATCH 039/100] [Test] Fix header value assertion for 401 error (#83037) The WWW-Authenticate header is multi-valued. In rare cases, the first value may not be the one beginning with "Basic". The PR makes the assertion agnostic to the order and also asserts for all possible header values. Resolves: #83022 --- .../xpack/security/user/AnonymousUserIntegTests.java | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/user/AnonymousUserIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/user/AnonymousUserIntegTests.java index b958ed44e7296..2b74f1a4bcde7 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/user/AnonymousUserIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/user/AnonymousUserIntegTests.java @@ -6,6 +6,7 @@ */ package org.elasticsearch.xpack.security.user; +import org.apache.http.Header; import org.apache.http.util.EntityUtils; import org.elasticsearch.action.get.GetAction; import org.elasticsearch.action.get.GetRequest; @@ -26,13 +27,15 @@ import org.junit.BeforeClass; import java.io.IOException; +import java.util.Arrays; +import java.util.List; import java.util.Map; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasItems; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; public class AnonymousUserIntegTests extends SecurityIntegTestCase { @@ -75,8 +78,11 @@ public void testAnonymousViaHttp() throws Exception { assertThat(EntityUtils.toString(response.getEntity()), containsString("security_exception")); } else { assertThat(statusCode, is(401)); - assertThat(response.getHeader("WWW-Authenticate"), notNullValue()); - assertThat(response.getHeader("WWW-Authenticate"), containsString("Basic")); + final List wwwAuthenticateHeaders = Arrays.stream(response.getHeaders()) + .filter(header -> "WWW-Authenticate".equalsIgnoreCase(header.getName())) + .map(Header::getValue) + .toList(); + assertThat(wwwAuthenticateHeaders, hasItems(containsString("Basic"), containsString("ApiKey"))); assertThat(EntityUtils.toString(response.getEntity()), containsString("security_exception")); } } From 3157d1ca925687e495830a4a109bc67143506486 Mon Sep 17 00:00:00 2001 From: Alan Woodward Date: Wed, 26 Jan 2022 13:33:39 +0000 Subject: [PATCH 040/100] Consolidate nested mapper information into NestedLookup (#75677) Nested mapping information is currently accessed by looking through a list of ObjectMappers and then reconstructing relationships by checking field names against possible parents. This is implemented in several different places throughout the codebase. Now that nested object mappers are separate from generic ObjectMapper, we can instead centralise this information into a single NestedLookup class built from the mappings. --- .../AggConstructionContentionBenchmark.java | 4 +- .../percolator/PercolateQueryBuilder.java | 5 +- .../search/aggregations/bucket/NestedIT.java | 6 +- .../fieldcaps/FieldCapabilitiesFetcher.java | 31 +++-- .../index/cache/bitset/BitsetFilterCache.java | 7 +- .../index/mapper/DocumentMapper.java | 2 +- .../index/mapper/FieldAliasMapper.java | 4 +- .../index/mapper/FieldMapper.java | 4 +- .../index/mapper/MapperService.java | 2 +- .../index/mapper/MappingLookup.java | 67 ++-------- .../index/mapper/NestedLookup.java | 114 ++++++++++++++++++ .../index/mapper/NestedObjectMapper.java | 1 - .../index/query/NestedQueryBuilder.java | 25 ++-- .../index/query/SearchExecutionContext.java | 33 +---- .../index/search/NestedHelper.java | 49 +++----- .../search/DefaultSearchContext.java | 6 +- .../elasticsearch/search/NestedDocuments.java | 34 +++--- .../nested/NestedAggregationBuilder.java | 21 +--- .../ReverseNestedAggregationBuilder.java | 12 +- .../support/AggregationContext.java | 10 +- .../search/fetch/subphase/FieldFetcher.java | 8 +- .../search/sort/FieldSortBuilder.java | 21 ++-- .../search/sort/SortBuilder.java | 10 +- .../index/mapper/NestedLookupTests.java | 84 +++++++++++++ .../index/mapper/NestedObjectMapperTests.java | 20 +-- .../index/query/ExistsQueryBuilderTests.java | 19 +-- .../index/query/NestedQueryBuilderTests.java | 3 +- .../index/search/NestedHelperTests.java | 108 ++++++----------- .../fetch/subphase/FieldFetcherTests.java | 3 +- .../search/sort/AbstractSortTestCase.java | 10 +- .../sort/GeoDistanceSortBuilderTests.java | 5 +- .../search/sort/NestedSortBuilderTests.java | 4 +- .../index/mapper/MapperServiceTestCase.java | 20 ++- .../authz/permission/DocumentPermissions.java | 6 +- .../mapper/DenseVectorFieldMapper.java | 2 +- .../mapper/WildcardFieldMapperTests.java | 6 + 36 files changed, 414 insertions(+), 352 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/index/mapper/NestedLookup.java create mode 100644 server/src/test/java/org/elasticsearch/index/mapper/NestedLookupTests.java diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/search/aggregations/AggConstructionContentionBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/search/aggregations/AggConstructionContentionBenchmark.java index f8d2e148b16ce..920af2525c7a4 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/search/aggregations/AggConstructionContentionBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/search/aggregations/AggConstructionContentionBenchmark.java @@ -28,9 +28,9 @@ import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.NestedLookup; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.index.mapper.NumberFieldMapper.NumberType; -import org.elasticsearch.index.mapper.ObjectMapper; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.support.NestedScope; import org.elasticsearch.indices.breaker.CircuitBreakerService; @@ -277,7 +277,7 @@ public Optional buildSort(List> sortBuilders) thr } @Override - public ObjectMapper getObjectMapper(String path) { + public NestedLookup nestedLookup() { throw new UnsupportedOperationException(); } diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java index 7e864b716ab92..d757e4c85fcca 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java @@ -51,6 +51,7 @@ import org.elasticsearch.index.mapper.LuceneDocument; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.NestedLookup; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.query.AbstractQueryBuilder; @@ -517,9 +518,9 @@ protected Analyzer getWrappedAnalyzer(String fieldName) { final IndexSearcher docSearcher; final boolean excludeNestedDocuments; if (docs.size() > 1 || docs.get(0).docs().size() > 1) { - assert docs.size() != 1 || context.hasNested(); + assert docs.size() != 1 || context.nestedLookup() != NestedLookup.EMPTY; docSearcher = createMultiDocumentSearcher(analyzer, docs); - excludeNestedDocuments = context.hasNested() + excludeNestedDocuments = context.nestedLookup() != NestedLookup.EMPTY && docs.stream().map(ParsedDocument::docs).mapToInt(List::size).anyMatch(size -> size > 1); } else { MemoryIndex memoryIndex = MemoryIndex.fromDocument(docs.get(0).rootDoc(), analyzer, true, false); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java index 7632bac9739e3..ac818c6649262 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java @@ -9,7 +9,6 @@ import org.apache.lucene.search.join.ScoreMode; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; @@ -361,6 +360,10 @@ public void testEmptyAggregation() throws Exception { assertThat(nested.getDocCount(), is(0L)); } + // TODO previously we would detect if you tried to do a nested agg on a non-nested object field, + // but ignore things if you tried to do a nested agg on any other field. We should probably + // decide which behaviour we want and do the same in both cases. + /* public void testNestedOnObjectField() throws Exception { try { client().prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(nested("object_field", "incorrect")).get(); @@ -369,6 +372,7 @@ public void testNestedOnObjectField() throws Exception { assertThat(e.toString(), containsString("[nested] nested path [incorrect] is not nested")); } } + */ // Test based on: https://github.com/elastic/elasticsearch/issues/9280 public void testParentFilterResolvedCorrectly() throws Exception { diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFetcher.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFetcher.java index a0db3268d5ed3..7eac2f54c1137 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFetcher.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFetcher.java @@ -11,7 +11,6 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.ObjectMapper; import org.elasticsearch.index.mapper.RuntimeField; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; @@ -107,22 +106,20 @@ FieldCapabilitiesIndexResponse fetch( // checks if the parent field contains sub-fields if (searchExecutionContext.getFieldType(parentField) == null) { // no field type, it must be an object field - ObjectMapper mapper = searchExecutionContext.getObjectMapper(parentField); - // Composite runtime fields do not have a mapped type for the root - check for null - if (mapper != null) { - String type = mapper.isNested() ? "nested" : "object"; - IndexFieldCapabilities fieldCap = new IndexFieldCapabilities( - parentField, - type, - false, - false, - false, - false, - null, - Collections.emptyMap() - ); - responseMap.put(parentField, fieldCap); - } + String type = searchExecutionContext.nestedLookup().getNestedMappers().get(parentField) != null + ? "nested" + : "object"; + IndexFieldCapabilities fieldCap = new IndexFieldCapabilities( + parentField, + type, + false, + false, + false, + false, + null, + Collections.emptyMap() + ); + responseMap.put(parentField, fieldCap); } dotIndex = parentField.lastIndexOf('.'); } diff --git a/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java b/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java index dc2112af309db..abbfe634e2ce3 100644 --- a/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java +++ b/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java @@ -38,7 +38,7 @@ import org.elasticsearch.index.IndexWarmer.TerminationHandle; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MappingLookup; -import org.elasticsearch.index.mapper.NestedObjectMapper; +import org.elasticsearch.index.mapper.NestedLookup; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardUtils; @@ -238,9 +238,10 @@ public IndexWarmer.TerminationHandle warmReader(final IndexShard indexShard, fin final Set warmUp = new HashSet<>(); final MapperService mapperService = indexShard.mapperService(); MappingLookup lookup = mapperService.mappingLookup(); - if (lookup.hasNested()) { + NestedLookup nestedLookup = lookup.nestedLookup(); + if (nestedLookup != NestedLookup.EMPTY) { warmUp.add(Queries.newNonNestedFilter()); - lookup.getNestedParentMappers().stream().map(NestedObjectMapper::nestedTypeFilter).forEach(warmUp::add); + warmUp.addAll(nestedLookup.getNestedParentFilters().values()); } final CountDownLatch latch = new CountDownLatch(reader.leaves().size() * warmUp.size()); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java index f6dc49cff5d2d..be8f9c5834787 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -95,7 +95,7 @@ public void validate(IndexSettings settings, boolean checkLimits) { ); } } - if (settings.getIndexSortConfig().hasIndexSort() && mappers().hasNested()) { + if (settings.getIndexSortConfig().hasIndexSort() && mappers().nestedLookup() != NestedLookup.EMPTY) { throw new IllegalArgumentException("cannot have nested fields when index sort is activated"); } List routingPaths = settings.getIndexMetadata().getRoutingPaths(); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldAliasMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldAliasMapper.java index 8a4c4ae16fd02..7ad30a93b921e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/FieldAliasMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldAliasMapper.java @@ -94,8 +94,8 @@ public void validate(MappingLookup mappers) { "Invalid [path] value [" + path + "] for field alias [" + name() + "]: an alias cannot refer to another alias." ); } - String aliasScope = mappers.getNestedParent(name); - String pathScope = mappers.getNestedParent(path); + String aliasScope = mappers.nestedLookup().getNestedParent(name); + String pathScope = mappers.nestedLookup().getNestedParent(path); if (Objects.equals(aliasScope, pathScope) == false) { StringBuilder message = new StringBuilder( diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java index 962e9f025d5cf..3ce33fb26c4b4 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java @@ -340,7 +340,7 @@ public final void validate(MappingLookup mappers) { throw new IllegalArgumentException("[copy_to] may not be used to copy from a multi-field: [" + this.name() + "]"); } - final String sourceScope = mappers.getNestedParent(this.name()); + final String sourceScope = mappers.nestedLookup().getNestedParent(this.name()); for (String copyTo : this.copyTo().copyToFields()) { if (mappers.isMultiField(copyTo)) { throw new IllegalArgumentException("[copy_to] may not be used to copy to a multi-field: [" + copyTo + "]"); @@ -349,7 +349,7 @@ public final void validate(MappingLookup mappers) { throw new IllegalArgumentException("Cannot copy to field [" + copyTo + "] since it is mapped as an object"); } - final String targetScope = mappers.getNestedParent(copyTo); + final String targetScope = mappers.nestedLookup().getNestedParent(copyTo); checkNestedScopeCompatibility(sourceScope, targetScope); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index b0f42f20346f2..8d5ff6df45b00 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -176,7 +176,7 @@ public MapperService( } public boolean hasNested() { - return mappingLookup().hasNested(); + return mappingLookup().nestedLookup() != NestedLookup.EMPTY; } public IndexAnalyzers getIndexAnalyzers() { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java b/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java index 13eff0d1e9fe9..5ce6b05556459 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java @@ -49,7 +49,7 @@ private CacheKey() {} /** Full field name to mapper */ private final Map fieldMappers; private final Map objectMappers; - private final boolean hasNested; + private final NestedLookup nestedLookup; private final FieldTypeLookup fieldTypeLookup; private final FieldTypeLookup indexTimeLookup; // for index-time scripts, a lookup that does not include runtime fields private final Map indexAnalyzersMap = new HashMap<>(); @@ -133,16 +133,16 @@ private MappingLookup( Map fieldMappers = new HashMap<>(); Map objects = new HashMap<>(); - boolean hasNested = false; + List nestedMappers = new ArrayList<>(); for (ObjectMapper mapper : objectMappers) { if (objects.put(mapper.fullPath(), mapper) != null) { throw new MapperParsingException("Object mapper [" + mapper.fullPath() + "] is defined more than once"); } if (mapper.isNested()) { - hasNested = true; + nestedMappers.add((NestedObjectMapper) mapper); } } - this.hasNested = hasNested; + this.nestedLookup = NestedLookup.build(nestedMappers); for (FieldMapper mapper : mappers) { if (objects.containsKey(mapper.name())) { @@ -327,14 +327,14 @@ private void checkNestedLimit(long limit) { } } - public boolean hasNested() { - return hasNested; - } - public Map objectMappers() { return objectMappers; } + public NestedLookup nestedLookup() { + return nestedLookup; + } + public boolean isMultiField(String field) { String sourceParent = parentObject(field); return sourceParent != null && fieldMappers.containsKey(sourceParent); @@ -344,23 +344,6 @@ public boolean isObjectField(String field) { return objectMappers.containsKey(field); } - /** - * Given a nested object path, returns the path to its nested parent - * - * In particular, if a nested field `foo` contains an object field - * `bar.baz`, then calling this method with `foo.bar.baz` will return - * the path `foo`, skipping over the object-but-not-nested `foo.bar` - */ - public String getNestedParent(String path) { - for (String parentPath = parentObject(path); parentPath != null; parentPath = parentObject(parentPath)) { - ObjectMapper objectMapper = objectMappers.get(parentPath); - if (objectMapper != null && objectMapper.isNested()) { - return parentPath; - } - } - return null; - } - private static String parentObject(String field) { int lastDot = field.lastIndexOf('.'); if (lastDot == -1) { @@ -455,40 +438,6 @@ public Mapping getMapping() { return mapping; } - /** - * Returns all nested object mappers - */ - public List getNestedMappers() { - List childMappers = new ArrayList<>(); - for (ObjectMapper mapper : objectMappers().values()) { - if (mapper.isNested() == false) { - continue; - } - childMappers.add((NestedObjectMapper) mapper); - } - return childMappers; - } - - /** - * Returns all nested object mappers which contain further nested object mappers - * - * Used by BitSetProducerWarmer - */ - public List getNestedParentMappers() { - List parents = new ArrayList<>(); - for (ObjectMapper mapper : objectMappers().values()) { - String nestedParentPath = getNestedParent(mapper.fullPath()); - if (nestedParentPath == null) { - continue; - } - ObjectMapper parent = objectMappers().get(nestedParentPath); - if (parent.isNested()) { - parents.add((NestedObjectMapper) parent); - } - } - return parents; - } - /** * Check if the provided {@link MappedFieldType} shadows a dimension * or metric field. diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NestedLookup.java b/server/src/main/java/org/elasticsearch/index/mapper/NestedLookup.java new file mode 100644 index 0000000000000..edc84752f48f9 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/mapper/NestedLookup.java @@ -0,0 +1,114 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.search.Query; + +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +/** + * Holds information about nested mappings + */ +public interface NestedLookup { + + /** + * @return a map of all nested object mappers in the current mapping + */ + Map getNestedMappers(); + + /** + * @return filters for nested objects that contain further nested mappers + */ + Map getNestedParentFilters(); + + /** + * Given a nested object path, returns the path to its nested parent + * + * In particular, if a nested field `foo` contains an object field + * `bar.baz`, then calling this method with `foo.bar.baz` will return + * the path `foo`, skipping over the object-but-not-nested `foo.bar` + * + * @param path the path to resolve + */ + String getNestedParent(String path); + + /** + * A NestedLookup for a mapping with no nested mappers + */ + NestedLookup EMPTY = new NestedLookup() { + @Override + public Map getNestedMappers() { + return Collections.emptyMap(); + } + + @Override + public Map getNestedParentFilters() { + return Collections.emptyMap(); + } + + @Override + public String getNestedParent(String path) { + return null; + } + }; + + /** + * Construct a NestedLookup from a list of NestedObjectMappers + * @param mappers the nested mappers to build a lookup over + */ + static NestedLookup build(List mappers) { + if (mappers == null || mappers.isEmpty()) { + return NestedLookup.EMPTY; + } + mappers = mappers.stream().sorted(Comparator.comparing(ObjectMapper::name)).collect(Collectors.toList()); + Map parentFilters = new HashMap<>(); + Map mappersByName = new HashMap<>(); + NestedObjectMapper previous = null; + for (NestedObjectMapper mapper : mappers) { + mappersByName.put(mapper.name(), mapper); + if (previous != null) { + if (mapper.name().startsWith(previous.name() + ".")) { + parentFilters.put(previous.name(), previous.nestedTypeFilter()); + } + } + previous = mapper; + } + List nestedPathNames = mappers.stream().map(NestedObjectMapper::name).collect(Collectors.toList()); + return new NestedLookup() { + + @Override + public Map getNestedMappers() { + return mappersByName; + } + + @Override + public Map getNestedParentFilters() { + return parentFilters; + } + + @Override + public String getNestedParent(String path) { + String parent = null; + for (String parentPath : nestedPathNames) { + if (path.startsWith(parentPath + ".")) { + // path names are ordered so this will give us the + // parent with the longest path + parent = parentPath; + } + } + return parent; + } + }; + } +} diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java index 87370aa704f01..f925dd4ddf3e1 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java @@ -174,7 +174,6 @@ public ObjectMapper merge(Mapper mergeWith, MapperService.MergeReason reason) { } NestedObjectMapper mergeWithObject = (NestedObjectMapper) mergeWith; NestedObjectMapper toMerge = (NestedObjectMapper) clone(); - if (reason == MapperService.MergeReason.INDEX_TEMPLATE) { if (mergeWithObject.includeInParent.explicit()) { toMerge.includeInParent = mergeWithObject.includeInParent; diff --git a/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java index e8b7a5ec90882..0bb668fcc9f56 100644 --- a/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java @@ -32,7 +32,6 @@ import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.index.mapper.NestedObjectMapper; -import org.elasticsearch.index.mapper.ObjectMapper; import org.elasticsearch.index.search.ESToParentBlockJoinQuery; import org.elasticsearch.index.search.NestedHelper; import org.elasticsearch.search.SearchHit; @@ -263,17 +262,14 @@ protected Query doToQuery(SearchExecutionContext context) throws IOException { ); } - ObjectMapper nestedObjectMapper = context.getObjectMapper(path); - if (nestedObjectMapper == null) { + NestedObjectMapper mapper = context.nestedLookup().getNestedMappers().get(path); + if (mapper == null) { if (ignoreUnmapped) { return new MatchNoDocsQuery(); } else { throw new IllegalStateException("[" + NAME + "] failed to find nested object under path [" + path + "]"); } } - if (nestedObjectMapper.isNested() == false) { - throw new IllegalStateException("[" + NAME + "] nested object under path [" + path + "] is not of nested type"); - } final BitSetProducer parentFilter; Query innerQuery; NestedObjectMapper objectMapper = context.nestedScope().getObjectMapper(); @@ -284,7 +280,7 @@ protected Query doToQuery(SearchExecutionContext context) throws IOException { } try { - context.nestedScope().nextLevel((NestedObjectMapper) nestedObjectMapper); + context.nestedScope().nextLevel(mapper); innerQuery = this.query.toQuery(context); } finally { context.nestedScope().previousLevel(); @@ -292,9 +288,9 @@ protected Query doToQuery(SearchExecutionContext context) throws IOException { // ToParentBlockJoinQuery requires that the inner query only matches documents // in its child space - NestedHelper nestedHelper = new NestedHelper(context::getObjectMapper, context::isFieldMapped); + NestedHelper nestedHelper = new NestedHelper(context.nestedLookup(), context::isFieldMapped); if (nestedHelper.mightMatchNonNestedDocs(innerQuery, path)) { - innerQuery = Queries.filtered(innerQuery, ((NestedObjectMapper) nestedObjectMapper).nestedTypeFilter()); + innerQuery = Queries.filtered(innerQuery, mapper.nestedTypeFilter()); } return new ESToParentBlockJoinQuery(innerQuery, parentFilter, scoreMode, objectMapper == null ? null : objectMapper.fullPath()); @@ -342,22 +338,21 @@ static class NestedInnerHitContextBuilder extends InnerHitContextBuilder { @Override protected void doBuild(SearchContext parentSearchContext, InnerHitsContext innerHitsContext) throws IOException { SearchExecutionContext searchExecutionContext = parentSearchContext.getSearchExecutionContext(); - ObjectMapper objectMapper = searchExecutionContext.getObjectMapper(path); - if (objectMapper == null || objectMapper.isNested() == false) { + NestedObjectMapper nestedMapper = searchExecutionContext.nestedLookup().getNestedMappers().get(path); + if (nestedMapper == null) { if (innerHitBuilder.isIgnoreUnmapped() == false) { throw new IllegalStateException("[" + query.getName() + "] no mapping found for type [" + path + "]"); } else { return; } } - NestedObjectMapper nestedObjectMapper = (NestedObjectMapper) objectMapper; - String name = innerHitBuilder.getName() != null ? innerHitBuilder.getName() : nestedObjectMapper.fullPath(); - NestedObjectMapper parentObjectMapper = searchExecutionContext.nestedScope().nextLevel(nestedObjectMapper); + String name = innerHitBuilder.getName() != null ? innerHitBuilder.getName() : nestedMapper.fullPath(); + NestedObjectMapper parentObjectMapper = searchExecutionContext.nestedScope().nextLevel(nestedMapper); NestedInnerHitSubContext nestedInnerHits = new NestedInnerHitSubContext( name, parentSearchContext, parentObjectMapper, - nestedObjectMapper + nestedMapper ); setupInnerHitsContext(searchExecutionContext, nestedInnerHits); searchExecutionContext.nestedScope().previousLevel(); diff --git a/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java b/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java index 1b96a27cf7ba6..457f7160306eb 100644 --- a/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java @@ -28,7 +28,6 @@ import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.core.CheckedFunction; -import org.elasticsearch.core.Nullable; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexSortConfig; @@ -44,8 +43,7 @@ import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MappingLookup; import org.elasticsearch.index.mapper.MappingParserContext; -import org.elasticsearch.index.mapper.NestedObjectMapper; -import org.elasticsearch.index.mapper.ObjectMapper; +import org.elasticsearch.index.mapper.NestedLookup; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.RuntimeField; import org.elasticsearch.index.mapper.SourceToParse; @@ -305,18 +303,14 @@ public ParsedDocument parseDocument(SourceToParse source) throws MapperParsingEx return mapperService.documentParser().parseDocument(source, mappingLookup); } - public boolean hasNested() { - return mappingLookup.hasNested(); + public NestedLookup nestedLookup() { + return mappingLookup.nestedLookup(); } public boolean hasMappings() { return mappingLookup.hasMappings(); } - public List nestedMappings() { - return mappingLookup.getNestedMappers(); - } - /** * Returns the names of all mapped fields that match a given pattern * @@ -376,16 +370,6 @@ private MappedFieldType fieldType(String name) { return fieldType == null ? mappingLookup.getFieldType(name) : fieldType; } - /** - * - * @param name name of the object - * @return can be null e.g. if field is root of a composite runtime field - */ - @Nullable - public ObjectMapper getObjectMapper(String name) { - return mappingLookup.objectMappers().get(name); - } - public boolean isMetadataField(String field) { return mapperService.isMetadataField(field); } @@ -706,17 +690,6 @@ public MappingLookup.CacheKey mappingCacheKey() { return mappingLookup.cacheKey(); } - /** - * Given a nested object path, returns the path to its nested parent - * - * In particular, if a nested field `foo` contains an object field - * `bar.baz`, then calling this method with `foo.bar.baz` will return - * the path `foo`, skipping over the object-but-not-nested `foo.bar` - */ - public String getNestedParent(String nestedPath) { - return mappingLookup.getNestedParent(nestedPath); - } - public NestedDocuments getNestedDocuments() { return new NestedDocuments(mappingLookup, bitsetFilterCache::getBitSetProducer); } diff --git a/server/src/main/java/org/elasticsearch/index/search/NestedHelper.java b/server/src/main/java/org/elasticsearch/index/search/NestedHelper.java index 5e0989edd9af9..c70ef88d1a7b9 100644 --- a/server/src/main/java/org/elasticsearch/index/search/NestedHelper.java +++ b/server/src/main/java/org/elasticsearch/index/search/NestedHelper.java @@ -21,21 +21,20 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.search.TermQuery; +import org.elasticsearch.index.mapper.NestedLookup; import org.elasticsearch.index.mapper.NestedObjectMapper; -import org.elasticsearch.index.mapper.ObjectMapper; -import java.util.function.Function; import java.util.function.Predicate; /** Utility class to filter parent and children clauses when building nested * queries. */ public final class NestedHelper { - private final Function objectMapperLookup; + private final NestedLookup nestedLookup; private final Predicate isMappedFieldPredicate; - public NestedHelper(Function objectMapperLookup, Predicate isMappedFieldPredicate) { - this.objectMapperLookup = objectMapperLookup; + public NestedHelper(NestedLookup nestedLookup, Predicate isMappedFieldPredicate) { + this.nestedLookup = nestedLookup; this.isMappedFieldPredicate = isMappedFieldPredicate; } @@ -102,13 +101,7 @@ boolean mightMatchNestedDocs(String field) { // field does not exist return false; } - for (String parent = parentObject(field); parent != null; parent = parentObject(parent)) { - ObjectMapper mapper = objectMapperLookup.apply(parent); - if (mapper != null && mapper.isNested()) { - return true; - } - } - return false; + return nestedLookup.getNestedParent(field) != null; } /** Returns true if the given query might match parent documents or documents @@ -171,30 +164,18 @@ boolean mightMatchNonNestedDocs(String field, String nestedPath) { if (isMappedFieldPredicate.test(field) == false) { return false; } - for (String parent = parentObject(field); parent != null; parent = parentObject(parent)) { - ObjectMapper mapper = objectMapperLookup.apply(parent); - if (mapper != null && mapper.isNested()) { - NestedObjectMapper nestedMapper = (NestedObjectMapper) mapper; - if (mapper.fullPath().equals(nestedPath)) { - // If the mapper does not include in its parent or in the root object then - // the query might only match nested documents with the given path - return nestedMapper.isIncludeInParent() || nestedMapper.isIncludeInRoot(); - } else { - // the first parent nested mapper does not have the expected path - // It might be misconfiguration or a sub nested mapper - return true; - } - } + String nestedParent = nestedLookup.getNestedParent(field); + if (nestedParent == null || nestedParent.startsWith(nestedPath) == false) { + // the field is not a sub field of the nested path + return true; } - return true; // the field is not a sub field of the nested path - } - - public static String parentObject(String field) { - int lastDot = field.lastIndexOf('.'); - if (lastDot == -1) { - return null; + NestedObjectMapper nestedMapper = nestedLookup.getNestedMappers().get(nestedParent); + // If the mapper does not include in its parent or in the root object then + // the query might only match nested documents with the given path + if (nestedParent.equals(nestedPath)) { + return nestedMapper.isIncludeInParent() || nestedMapper.isIncludeInRoot(); } - return field.substring(0, lastDot); + return true; } } diff --git a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java index bb525aaf61273..5a2ad3635c1a1 100644 --- a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java @@ -24,6 +24,7 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.mapper.NestedLookup; import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.QueryBuilder; @@ -274,8 +275,9 @@ public void preProcess() { @Override public Query buildFilteredQuery(Query query) { List filters = new ArrayList<>(); - NestedHelper nestedHelper = new NestedHelper(searchExecutionContext::getObjectMapper, searchExecutionContext::isFieldMapped); - if (searchExecutionContext.hasNested() + NestedLookup nestedLookup = searchExecutionContext.nestedLookup(); + NestedHelper nestedHelper = new NestedHelper(nestedLookup, searchExecutionContext::isFieldMapped); + if (nestedLookup != NestedLookup.EMPTY && nestedHelper.mightMatchNestedDocs(query) && (aliasFilter == null || nestedHelper.mightMatchNestedDocs(aliasFilter))) { filters.add(Queries.newNonNestedFilter()); diff --git a/server/src/main/java/org/elasticsearch/search/NestedDocuments.java b/server/src/main/java/org/elasticsearch/search/NestedDocuments.java index 3bf879a0755bc..1be9c629d9be4 100644 --- a/server/src/main/java/org/elasticsearch/search/NestedDocuments.java +++ b/server/src/main/java/org/elasticsearch/search/NestedDocuments.java @@ -20,6 +20,7 @@ import org.apache.lucene.util.BitSet; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.index.mapper.MappingLookup; +import org.elasticsearch.index.mapper.NestedLookup; import org.elasticsearch.index.mapper.NestedObjectMapper; import java.io.IOException; @@ -34,9 +35,8 @@ public class NestedDocuments { private final Map parentObjectFilters = new HashMap<>(); private final Map childObjectFilters = new HashMap<>(); - private final Map childObjectMappers = new HashMap<>(); private final BitSetProducer parentDocumentFilter; - private final MappingLookup mappingLookup; + private final NestedLookup nestedLookup; /** * Create a new NestedDocuments object for an index @@ -44,17 +44,14 @@ public class NestedDocuments { * @param filterProducer a function to build BitSetProducers from filter queries */ public NestedDocuments(MappingLookup mappingLookup, Function filterProducer) { - this.mappingLookup = mappingLookup; - if (mappingLookup.hasNested() == false) { + this.nestedLookup = mappingLookup.nestedLookup(); + if (this.nestedLookup == NestedLookup.EMPTY) { this.parentDocumentFilter = null; } else { this.parentDocumentFilter = filterProducer.apply(Queries.newNonNestedFilter()); - for (NestedObjectMapper mapper : mappingLookup.getNestedParentMappers()) { - parentObjectFilters.put(mapper.name(), filterProducer.apply(mapper.nestedTypeFilter())); - } - for (NestedObjectMapper mapper : mappingLookup.getNestedMappers()) { - childObjectFilters.put(mapper.name(), null); - childObjectMappers.put(mapper.name(), mapper); + nestedLookup.getNestedParentFilters().forEach((k, v) -> parentObjectFilters.put(k, filterProducer.apply(v))); + for (String nestedPath : nestedLookup.getNestedMappers().keySet()) { + childObjectFilters.put(nestedPath, null); } } } @@ -70,12 +67,12 @@ public LeafNestedDocuments getLeafNestedDocuments(LeafReaderContext ctx) throws } private Weight getNestedChildWeight(LeafReaderContext ctx, String path) throws IOException { - if (childObjectFilters.containsKey(path) == false || childObjectMappers.containsKey(path) == false) { + if (childObjectFilters.containsKey(path) == false) { throw new IllegalStateException("Cannot find object mapper for path " + path); } if (childObjectFilters.get(path) == null) { IndexSearcher searcher = new IndexSearcher(ReaderUtil.getTopLevelContext(ctx)); - NestedObjectMapper childMapper = childObjectMappers.get(path); + NestedObjectMapper childMapper = nestedLookup.getNestedMappers().get(path); childObjectFilters.put( path, searcher.createWeight(searcher.rewrite(childMapper.nestedTypeFilter()), ScoreMode.COMPLETE_NO_SCORES, 1) @@ -168,7 +165,7 @@ private SearchHit.NestedIdentity loadNestedIdentity() throws IOException { int parentNameLength; String path = findObjectPath(doc); while (path != null) { - String parent = mappingLookup.getNestedParent(path); + String parent = nestedLookup.getNestedParent(path); // We have to pull a new scorer for each document here, because we advance from // the last parent which will be behind the doc Scorer childScorer = getNestedChildWeight(ctx, path).scorer(ctx); @@ -181,7 +178,16 @@ private SearchHit.NestedIdentity loadNestedIdentity() throws IOException { parentNameLength = 0; } else { if (objectFilters.containsKey(parent) == false) { - throw new IllegalStateException("Cannot find parent mapper for path " + path + " in doc " + doc); + throw new IllegalStateException( + "Cannot find parent mapper " + + parent + + " for path " + + path + + " in doc " + + doc + + " - known parents are " + + objectFilters.keySet() + ); } parentBitSet = objectFilters.get(parent); parentNameLength = parent.length() + 1; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregationBuilder.java index 363fec62c135f..5d2e715d6a69b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregationBuilder.java @@ -12,10 +12,8 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.mapper.NestedObjectMapper; -import org.elasticsearch.index.mapper.ObjectMapper; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; import org.elasticsearch.search.aggregations.AggregationBuilder; -import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.support.AggregationContext; @@ -84,26 +82,15 @@ public BucketCardinality bucketCardinality() { @Override protected AggregatorFactory doBuild(AggregationContext context, AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { - ObjectMapper childObjectMapper = context.getObjectMapper(path); - if (childObjectMapper == null) { + NestedObjectMapper nestedMapper = context.nestedLookup().getNestedMappers().get(path); + if (nestedMapper == null) { // in case the path has been unmapped: return new NestedAggregatorFactory(name, null, null, context, parent, subFactoriesBuilder, metadata); } - if (childObjectMapper.isNested() == false) { - throw new AggregationExecutionException("[nested] nested path [" + path + "] is not nested"); - } try { - NestedObjectMapper parentObjectMapper = context.nestedScope().nextLevel((NestedObjectMapper) childObjectMapper); - return new NestedAggregatorFactory( - name, - parentObjectMapper, - (NestedObjectMapper) childObjectMapper, - context, - parent, - subFactoriesBuilder, - metadata - ); + NestedObjectMapper parentObjectMapper = context.nestedScope().nextLevel(nestedMapper); + return new NestedAggregatorFactory(name, parentObjectMapper, nestedMapper, context, parent, subFactoriesBuilder, metadata); } finally { context.nestedScope().previousLevel(); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregationBuilder.java index f49f0d4410211..6501e003edef4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregationBuilder.java @@ -12,11 +12,9 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.mapper.NestedObjectMapper; -import org.elasticsearch.index.mapper.ObjectMapper; import org.elasticsearch.index.query.support.NestedScope; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; import org.elasticsearch.search.aggregations.AggregationBuilder; -import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.support.AggregationContext; @@ -91,19 +89,15 @@ protected AggregatorFactory doBuild(AggregationContext context, AggregatorFactor throw new IllegalArgumentException("Reverse nested aggregation [" + name + "] can only be used inside a [nested] aggregation"); } - ObjectMapper parentObjectMapper = null; + NestedObjectMapper nestedMapper = null; if (path != null) { - parentObjectMapper = context.getObjectMapper(path); - if (parentObjectMapper == null) { + nestedMapper = context.nestedLookup().getNestedMappers().get(path); + if (nestedMapper == null) { return new ReverseNestedAggregatorFactory(name, true, null, context, parent, subFactoriesBuilder, metadata); } - if (parentObjectMapper.isNested() == false) { - throw new AggregationExecutionException("[reverse_nested] nested path [" + path + "] is not nested"); - } } NestedScope nestedScope = context.nestedScope(); - NestedObjectMapper nestedMapper = (NestedObjectMapper) parentObjectMapper; try { nestedScope.nextLevel(nestedMapper); return new ReverseNestedAggregatorFactory(name, false, nestedMapper, context, parent, subFactoriesBuilder, metadata); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationContext.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationContext.java index b2c68659bbaae..7219a502dfb77 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationContext.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationContext.java @@ -25,7 +25,7 @@ import org.elasticsearch.index.cache.bitset.BitsetFilterCache; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.ObjectMapper; +import org.elasticsearch.index.mapper.NestedLookup; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.Rewriteable; import org.elasticsearch.index.query.SearchExecutionContext; @@ -206,9 +206,9 @@ public final AggregationUsageService getUsageService() { public abstract Optional buildSort(List> sortBuilders) throws IOException; /** - * Find an {@link ObjectMapper}. + * Get the {@link NestedLookup} of this index */ - public abstract ObjectMapper getObjectMapper(String path); + public abstract NestedLookup nestedLookup(); /** * Access the nested scope. Stay away from this unless you are dealing with nested. @@ -474,8 +474,8 @@ public Optional buildSort(List> sortBuilders) thr } @Override - public ObjectMapper getObjectMapper(String path) { - return context.getObjectMapper(path); + public NestedLookup nestedLookup() { + return context.nestedLookup(); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/FieldFetcher.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/FieldFetcher.java index ffc02903591e9..e68ae0f4b9cff 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/FieldFetcher.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/FieldFetcher.java @@ -16,7 +16,6 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NestedValueFetcher; -import org.elasticsearch.index.mapper.ObjectMapper; import org.elasticsearch.index.mapper.ValueFetcher; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.search.lookup.SourceLookup; @@ -24,7 +23,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; @@ -45,9 +43,7 @@ public class FieldFetcher { private static final int AUTOMATON_MAX_DETERMINIZED_STATES = 100000; public static FieldFetcher create(SearchExecutionContext context, Collection fieldAndFormats) { - Set nestedMappingPaths = context.hasNested() - ? context.nestedMappings().stream().map(ObjectMapper::name).collect(Collectors.toSet()) - : Collections.emptySet(); + Set nestedMappingPaths = context.nestedLookup().getNestedMappers().keySet(); return create(context, fieldAndFormats, nestedMappingPaths, ""); } @@ -268,7 +264,7 @@ private void collectUnmappedList(Map documentFields, Iter private static Set getParentPaths(Set nestedPathsInScope, SearchExecutionContext context) { Set parentPaths = new HashSet<>(); for (String candidate : nestedPathsInScope) { - String nestedParent = context.getNestedParent(candidate); + String nestedParent = context.nestedLookup().getNestedParent(candidate); // if the candidate has no nested parent itself, its a minimal parent path // if the candidate has a parent which is out of scope this means it minimal itself if (nestedParent == null || nestedPathsInScope.contains(nestedParent) == false) { diff --git a/server/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java index f316cb4f24b41..2efae46d05627 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java @@ -30,9 +30,9 @@ import org.elasticsearch.index.mapper.DateFieldMapper.DateFieldType; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.NestedLookup; import org.elasticsearch.index.mapper.NestedObjectMapper; import org.elasticsearch.index.mapper.NumberFieldMapper.NumberFieldType; -import org.elasticsearch.index.mapper.ObjectMapper; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.QueryShardException; @@ -55,7 +55,6 @@ import static org.elasticsearch.index.mapper.DateFieldMapper.Resolution.MILLISECONDS; import static org.elasticsearch.index.mapper.DateFieldMapper.Resolution.NANOSECONDS; -import static org.elasticsearch.index.search.NestedHelper.parentObject; import static org.elasticsearch.search.sort.NestedSortBuilder.NESTED_FIELD; /** @@ -660,17 +659,13 @@ static void validateMissingNestedPath(SearchExecutionContext context, String fie // already in nested context return; } - for (String parent = parentObject(field); parent != null; parent = parentObject(parent)) { - ObjectMapper parentMapper = context.getObjectMapper(parent); - if (parentMapper != null && parentMapper.isNested()) { - NestedObjectMapper parentNested = (NestedObjectMapper) parentMapper; - if (parentNested.isIncludeInRoot() == false) { - throw new QueryShardException( - context, - "it is mandatory to set the [nested] context on the nested sort field: [" + field + "]." - ); - } - } + NestedLookup nestedLookup = context.nestedLookup(); + String nestedParent = nestedLookup.getNestedParent(field); + if (nestedParent != null && nestedLookup.getNestedMappers().get(nestedParent).isIncludeInRoot() == false) { + throw new QueryShardException( + context, + "it is mandatory to set the [nested] context on the nested sort field: [" + field + "]." + ); } } diff --git a/server/src/main/java/org/elasticsearch/search/sort/SortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/SortBuilder.java index cca7cc1f11aaf..7b45f19943274 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/SortBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/sort/SortBuilder.java @@ -20,7 +20,6 @@ import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; import org.elasticsearch.index.mapper.NestedObjectMapper; -import org.elasticsearch.index.mapper.ObjectMapper; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.index.query.Rewriteable; @@ -210,15 +209,10 @@ private static Query resolveNestedQuery(SearchExecutionContext context, NestedSo NestedSortBuilder nestedNestedSort = nestedSort.getNestedSort(); // verify our nested path - ObjectMapper objectMapper = context.getObjectMapper(nestedPath); - - if (objectMapper == null) { + NestedObjectMapper nestedObjectMapper = context.nestedLookup().getNestedMappers().get(nestedPath); + if (nestedObjectMapper == null) { throw new QueryShardException(context, "[nested] failed to find nested object under path [" + nestedPath + "]"); } - if (objectMapper.isNested() == false) { - throw new QueryShardException(context, "[nested] nested object under path [" + nestedPath + "] is not of nested type"); - } - NestedObjectMapper nestedObjectMapper = (NestedObjectMapper) objectMapper; NestedObjectMapper parentMapper = context.nestedScope().getObjectMapper(); // get our child query, potentially applying a users filter diff --git a/server/src/test/java/org/elasticsearch/index/mapper/NestedLookupTests.java b/server/src/test/java/org/elasticsearch/index/mapper/NestedLookupTests.java new file mode 100644 index 0000000000000..1c52a6d94fb48 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/mapper/NestedLookupTests.java @@ -0,0 +1,84 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.mapper; + +import org.elasticsearch.Version; + +import java.io.IOException; +import java.util.List; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.hasSize; + +public class NestedLookupTests extends MapperServiceTestCase { + + public void testMultiLevelParents() throws IOException { + + String mapping = "{\n" + + " \"_doc\": {\n" + + " \"properties\" : {\n" + + " \"SWufZ\" : {\n" + + " \"type\" : \"nested\",\n" + + " \"properties\" : {\n" + + " \"ZCPoX\" : {\n" + + " \"type\" : \"keyword\"\n" + + " },\n" + + " \"NnUDX\" : {\n" + + " \"properties\" : {\n" + + " \"dljyS\" : {\n" + + " \"type\" : \"nested\",\n" + + " \"properties\" : {\n" + + " \"JYmZZ\" : {\n" + + " \"type\" : \"keyword\"\n" + + " },\n" + + " \"EvbGO\" : {\n" + + " \"type\" : \"nested\",\n" + + " \"properties\" : {\n" + + " \"LAgoT\" : {\n" + + " \"type\" : \"keyword\"\n" + + " }\n" + + " }\n" + + " }\n" + + " }\n" + + " }\n" + + " }\n" + + " }\n" + + " }\n" + + " }\n" + + " }\n" + + " }\n" + + "}"; + + MapperService mapperService = createMapperService(mapping); + + NestedLookup lookup = mapperService.mappingLookup().nestedLookup(); + assertEquals("SWufZ.NnUDX.dljyS", lookup.getNestedParent("SWufZ.NnUDX.dljyS.EvbGO")); + assertThat(lookup.getNestedParentFilters().keySet(), hasSize(2)); + + } + + private static NestedObjectMapper buildMapper(String name) { + return new NestedObjectMapper.Builder(name, Version.CURRENT).build(MapperBuilderContext.ROOT); + } + + public void testAllParentFilters() { + List mappers = List.of( + buildMapper("a.b"), + buildMapper("a.d"), + buildMapper("a.b.c.d.e"), + buildMapper("a.b.d"), + buildMapper("a"), + buildMapper("a.b.c.d") + ); + + NestedLookup lookup = NestedLookup.build(mappers); + assertThat(lookup.getNestedParentFilters().keySet(), containsInAnyOrder("a", "a.b", "a.b.c.d")); + } + +} diff --git a/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java index d9677ac35a57f..5355ecb676323 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java @@ -59,7 +59,7 @@ public void testSingleNested() throws Exception { DocumentMapper docMapper = createDocumentMapper(mapping(b -> b.startObject("nested1").field("type", "nested").endObject())); - assertThat(docMapper.mappers().hasNested(), equalTo(true)); + assertNotEquals(NestedLookup.EMPTY, docMapper.mappers().nestedLookup()); ObjectMapper mapper = docMapper.mappers().objectMappers().get("nested1"); assertThat(mapper, instanceOf(NestedObjectMapper.class)); NestedObjectMapper nested1Mapper = (NestedObjectMapper) mapper; @@ -136,7 +136,7 @@ public void testMultiNested() throws Exception { b.endObject(); })); - assertThat(docMapper.mappers().hasNested(), equalTo(true)); + assertNotEquals(NestedLookup.EMPTY, docMapper.mappers().nestedLookup()); ObjectMapper mapper1 = docMapper.mappers().objectMappers().get("nested1"); assertThat(mapper1, instanceOf(NestedObjectMapper.class)); NestedObjectMapper nested1Mapper = (NestedObjectMapper) mapper1; @@ -226,7 +226,7 @@ public void testMultiObjectAndNested1() throws Exception { b.endObject(); })); - assertThat(docMapper.mappers().hasNested(), equalTo(true)); + assertNotEquals(NestedLookup.EMPTY, docMapper.mappers().nestedLookup()); ObjectMapper mapper1 = docMapper.mappers().objectMappers().get("nested1"); assertThat(mapper1, instanceOf(NestedObjectMapper.class)); NestedObjectMapper nested1Mapper = (NestedObjectMapper) mapper1; @@ -317,7 +317,7 @@ public void testMultiObjectAndNested2() throws Exception { b.endObject(); })); - assertThat(docMapper.mappers().hasNested(), equalTo(true)); + assertNotEquals(NestedLookup.EMPTY, docMapper.mappers().nestedLookup()); ObjectMapper mapper1 = docMapper.mappers().objectMappers().get("nested1"); assertThat(mapper1, instanceOf(NestedObjectMapper.class)); NestedObjectMapper nested1Mapper = (NestedObjectMapper) mapper1; @@ -407,11 +407,11 @@ public void testMultiRootAndNested1() throws Exception { b.endObject(); })); - assertEquals("nested1", docMapper.mappers().getNestedParent("nested1.nested2")); - assertNull(docMapper.mappers().getNestedParent("nonexistent")); - assertNull(docMapper.mappers().getNestedParent("nested1")); + assertEquals("nested1", docMapper.mappers().nestedLookup().getNestedParent("nested1.nested2")); + assertNull(docMapper.mappers().nestedLookup().getNestedParent("nonexistent")); + assertNull(docMapper.mappers().nestedLookup().getNestedParent("nested1")); - assertThat(docMapper.mappers().hasNested(), equalTo(true)); + assertNotEquals(NestedLookup.EMPTY, docMapper.mappers().nestedLookup()); ObjectMapper mapper1 = docMapper.mappers().objectMappers().get("nested1"); assertThat(mapper1, instanceOf(NestedObjectMapper.class)); NestedObjectMapper nested1Mapper = (NestedObjectMapper) mapper1; @@ -745,7 +745,7 @@ public void testNestedArrayStrict() throws Exception { b.endObject(); })); - assertThat(docMapper.mappers().hasNested(), equalTo(true)); + assertNotEquals(NestedLookup.EMPTY, docMapper.mappers().nestedLookup()); ObjectMapper nested1Mapper = docMapper.mappers().objectMappers().get("nested1"); assertThat(nested1Mapper, instanceOf(NestedObjectMapper.class)); assertThat(nested1Mapper.dynamic(), equalTo(Dynamic.STRICT)); @@ -974,7 +974,7 @@ public void testReorderParent() throws IOException { mapping(b -> b.startObject("nested1").field("type", "nested").endObject()) ); - assertThat(docMapper.mappers().hasNested(), equalTo(true)); + assertNotEquals(NestedLookup.EMPTY, docMapper.mappers().nestedLookup()); ObjectMapper mapper = docMapper.mappers().objectMappers().get("nested1"); assertThat(mapper, instanceOf(NestedObjectMapper.class)); diff --git a/server/src/test/java/org/elasticsearch/index/query/ExistsQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/ExistsQueryBuilderTests.java index 191817d126ab4..6df0453d51947 100644 --- a/server/src/test/java/org/elasticsearch/index/query/ExistsQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/ExistsQueryBuilderTests.java @@ -20,9 +20,8 @@ import org.elasticsearch.test.AbstractQueryTestCase; import java.io.IOException; -import java.util.ArrayList; import java.util.Collection; -import java.util.List; +import java.util.Collections; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; @@ -49,8 +48,11 @@ protected ExistsQueryBuilder doCreateTestQueryBuilder() { protected void doAssertLuceneQuery(ExistsQueryBuilder queryBuilder, Query query, SearchExecutionContext context) throws IOException { String fieldPattern = queryBuilder.fieldName(); Collection fields = context.getMatchingFieldNames(fieldPattern); - if (fields.size() == 0 && Regex.isSimpleMatchPattern(fieldPattern) == false && context.getObjectMapper(fieldPattern) != null) { - fields = context.getMatchingFieldNames(fieldPattern + ".*"); + if (fields.size() == 0 && Regex.isSimpleMatchPattern(fieldPattern) == false) { + if (context.getMatchingFieldNames(fieldPattern + ".*").isEmpty() == false) { + // we're an object field + fields = Collections.singleton(fieldPattern); + } } if (fields.size() == 0) { assertThat(fieldPattern, query, instanceOf(MatchNoDocsQuery.class)); @@ -58,14 +60,13 @@ protected void doAssertLuceneQuery(ExistsQueryBuilder queryBuilder, Query query, assertThat(query, instanceOf(ConstantScoreQuery.class)); ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) query; String field = expectedFieldName(fields.iterator().next()); - if (context.getObjectMapper(field) != null) { + if (context.getFieldType(field) == null) { + // not a leaf field, so we're doing an object exists query assertThat(constantScoreQuery.getQuery(), instanceOf(BooleanQuery.class)); BooleanQuery booleanQuery = (BooleanQuery) constantScoreQuery.getQuery(); - List childFields = new ArrayList<>(); - context.getObjectMapper(field).forEach(mapper -> childFields.add(mapper.name())); + Collection childFields = context.getMatchingFieldNames(field + ".*"); assertThat(booleanQuery.clauses().size(), equalTo(childFields.size())); - for (int i = 0; i < childFields.size(); i++) { - BooleanClause booleanClause = booleanQuery.clauses().get(i); + for (BooleanClause booleanClause : booleanQuery) { assertThat(booleanClause.getOccur(), equalTo(BooleanClause.Occur.SHOULD)); } } else if (context.getFieldType(field).hasDocValues()) { diff --git a/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java index b9712286560c0..a9f121e9dc349 100644 --- a/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.NestedLookup; import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder; import org.elasticsearch.index.search.ESToParentBlockJoinQuery; import org.elasticsearch.search.fetch.subphase.InnerHitsContext; @@ -330,9 +331,9 @@ public void testInlineLeafInnerHitsNestedQueryViaFunctionScoreQuery() { public void testBuildIgnoreUnmappedNestQuery() throws Exception { SearchExecutionContext searchExecutionContext = mock(SearchExecutionContext.class); - when(searchExecutionContext.getObjectMapper("path")).thenReturn(null); IndexSettings settings = new IndexSettings(newIndexMeta("index", Settings.EMPTY), Settings.EMPTY); when(searchExecutionContext.getIndexSettings()).thenReturn(settings); + when(searchExecutionContext.nestedLookup()).thenReturn(NestedLookup.EMPTY); SearchContext searchContext = mock(SearchContext.class); when(searchContext.getSearchExecutionContext()).thenReturn(searchExecutionContext); InnerHitBuilder leafInnerHits = randomNestedInnerHits(); diff --git a/server/src/test/java/org/elasticsearch/index/search/NestedHelperTests.java b/server/src/test/java/org/elasticsearch/index/search/NestedHelperTests.java index acab3fe2de42d..793e9eadf9f02 100644 --- a/server/src/test/java/org/elasticsearch/index/search/NestedHelperTests.java +++ b/server/src/test/java/org/elasticsearch/index/search/NestedHelperTests.java @@ -8,94 +8,69 @@ package org.elasticsearch.index.search; -import org.apache.lucene.index.MultiReader; import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; -import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.join.ScoreMode; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.MapperServiceTestCase; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.NestedQueryBuilder; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.query.TermQueryBuilder; -import org.elasticsearch.test.ESSingleNodeTestCase; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentFactory; import java.io.IOException; import java.util.Collections; -import static java.util.Collections.emptyMap; +import static org.mockito.Mockito.mock; -public class NestedHelperTests extends ESSingleNodeTestCase { +public class NestedHelperTests extends MapperServiceTestCase { - IndexService indexService; MapperService mapperService; @Override public void setUp() throws Exception { super.setUp(); - XContentBuilder mapping = XContentFactory.jsonBuilder() - .startObject() - .startObject("_doc") - .startObject("properties") - .startObject("foo") - .field("type", "keyword") - .endObject() - .startObject("foo2") - .field("type", "long") - .endObject() - .startObject("nested1") - .field("type", "nested") - .startObject("properties") - .startObject("foo") - .field("type", "keyword") - .endObject() - .startObject("foo2") - .field("type", "long") - .endObject() - .endObject() - .endObject() - .startObject("nested2") - .field("type", "nested") - .field("include_in_parent", true) - .startObject("properties") - .startObject("foo") - .field("type", "keyword") - .endObject() - .startObject("foo2") - .field("type", "long") - .endObject() - .endObject() - .endObject() - .startObject("nested3") - .field("type", "nested") - .field("include_in_root", true) - .startObject("properties") - .startObject("foo") - .field("type", "keyword") - .endObject() - .startObject("foo2") - .field("type", "long") - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - .endObject(); - indexService = createIndex("index", Settings.EMPTY, mapping); - mapperService = indexService.mapperService(); + String mapping = """ + { "_doc" : { + "properties" : { + "foo" : { "type" : "keyword" }, + "foo2" : { "type" : "long" }, + "nested1" : { + "type" : "nested", + "properties" : { + "foo" : { "type" : "keyword" }, + "foo2" : { "type" : "long" } + } + }, + "nested2" : { + "type" : "nested", + "include_in_parent" : true, + "properties": { + "foo" : { "type" : "keyword" }, + "foo2" : { "type" : "long" } + } + }, + "nested3" : { + "type" : "nested", + "include_in_root" : true, + "properties": { + "foo" : { "type" : "keyword" }, + "foo2" : { "type" : "long" } + } + } + } + } } + """; + mapperService = createMapperService(mapping); } private static NestedHelper buildNestedHelper(MapperService mapperService) { - return new NestedHelper(mapperService.mappingLookup().objectMappers()::get, field -> mapperService.fieldType(field) != null); + return new NestedHelper(mapperService.mappingLookup().nestedLookup(), field -> mapperService.fieldType(field) != null); } public void testMatchAll() { @@ -175,7 +150,7 @@ public void testTermQuery() { } public void testRangeQuery() { - SearchExecutionContext context = createSearchContext(indexService).getSearchExecutionContext(); + SearchExecutionContext context = mock(SearchExecutionContext.class); Query rangeQuery = mapperService.fieldType("foo2").rangeQuery(2, 5, true, true, null, null, null, context); assertFalse(buildNestedHelper(mapperService).mightMatchNestedDocs(rangeQuery)); assertTrue(buildNestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested1")); @@ -302,14 +277,7 @@ public void testConjunction() { } public void testNested() throws IOException { - SearchExecutionContext context = indexService.newSearchExecutionContext( - 0, - 0, - new IndexSearcher(new MultiReader()), - () -> 0, - null, - emptyMap() - ); + SearchExecutionContext context = createSearchExecutionContext(mapperService); NestedQueryBuilder queryBuilder = new NestedQueryBuilder("nested1", new MatchAllQueryBuilder(), ScoreMode.Avg); ESToParentBlockJoinQuery query = (ESToParentBlockJoinQuery) queryBuilder.toQuery(context); diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/FieldFetcherTests.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/FieldFetcherTests.java index a34d0143986b0..490203e856268 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/FieldFetcherTests.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/FieldFetcherTests.java @@ -46,6 +46,7 @@ import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItems; +import static org.hamcrest.Matchers.hasSize; public class FieldFetcherTests extends MapperServiceTestCase { @@ -718,7 +719,7 @@ public void testNestedFields() throws IOException { .endObject(); Map fields = fetchFields(mapperService, source, fieldAndFormatList("*", null, false)); - assertEquals(2, fields.size()); + assertThat(fields.values(), hasSize(2)); assertThat(fields.keySet(), containsInAnyOrder("f1", "obj")); assertEquals("value1", fields.get("f1").getValue()); List obj = fields.get("obj").getValues(); diff --git a/server/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java b/server/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java index e52f58826bfac..9c16d5fc275b3 100644 --- a/server/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java +++ b/server/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java @@ -23,9 +23,9 @@ import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperBuilderContext; +import org.elasticsearch.index.mapper.NestedLookup; import org.elasticsearch.index.mapper.NestedObjectMapper; import org.elasticsearch.index.mapper.NumberFieldMapper; -import org.elasticsearch.index.mapper.ObjectMapper; import org.elasticsearch.index.query.IdsQueryBuilder; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; @@ -53,6 +53,7 @@ import java.io.IOException; import java.util.Collections; +import java.util.List; import java.util.Map; import java.util.function.Function; import java.util.function.Supplier; @@ -195,6 +196,9 @@ protected final SearchExecutionContext createMockSearchExecutionContext(IndexSea IndexFieldData.Builder builder = fieldType.fielddataBuilder(fieldIndexName, searchLookup); return builder.build(new IndexFieldDataCache.None(), null); }; + NestedLookup nestedLookup = NestedLookup.build( + List.of(new NestedObjectMapper.Builder("path", Version.CURRENT).build(MapperBuilderContext.ROOT)) + ); return new SearchExecutionContext( 0, 0, @@ -223,8 +227,8 @@ public MappedFieldType getFieldType(String name) { } @Override - public ObjectMapper getObjectMapper(String name) { - return new NestedObjectMapper.Builder(name, Version.CURRENT).build(MapperBuilderContext.ROOT); + public NestedLookup nestedLookup() { + return nestedLookup; } }; } diff --git a/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java b/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java index 75ba048379ee8..bc3c9fbbc6e70 100644 --- a/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java @@ -87,8 +87,7 @@ public static GeoDistanceSortBuilder randomGeoDistanceSortBuilder() { result.validation(randomValueOtherThan(result.validation(), () -> randomFrom(GeoValidationMethod.values()))); } if (randomBoolean()) { - // don't fully randomize here, GeoDistanceSort is picky about the filters that are allowed - NestedSortBuilder nestedSort = new NestedSortBuilder(randomAlphaOfLengthBetween(3, 10)); + NestedSortBuilder nestedSort = new NestedSortBuilder("path"); nestedSort.setFilter(new MatchAllQueryBuilder()); result.setNestedSort(nestedSort); } @@ -386,7 +385,7 @@ public void testCommonCaseIsOptimized() throws IOException { assertEquals(SortField.class, sort.field.getClass()); // descending means the max value should be considered rather than min builder = new GeoDistanceSortBuilder("random_field_name", new GeoPoint(3.5, 2.1)); - builder.setNestedSort(new NestedSortBuilder("some_nested_path")); + builder.setNestedSort(new NestedSortBuilder("path")); sort = builder.build(context); assertEquals(SortField.class, sort.field.getClass()); // can't use LatLon optimized sorting with nested fields diff --git a/server/src/test/java/org/elasticsearch/search/sort/NestedSortBuilderTests.java b/server/src/test/java/org/elasticsearch/search/sort/NestedSortBuilderTests.java index 270459db51ede..c6fa7ee82d55d 100644 --- a/server/src/test/java/org/elasticsearch/search/sort/NestedSortBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/sort/NestedSortBuilderTests.java @@ -73,10 +73,10 @@ public void testFromXContent() throws IOException { } /** - * Create a {@link NestedSortBuilder} with random path and filter of the given depth. + * Create a {@link NestedSortBuilder} with random filter of the given depth. */ public static NestedSortBuilder createRandomNestedSort(int depth) { - NestedSortBuilder nestedSort = new NestedSortBuilder(randomAlphaOfLengthBetween(3, 10)); + NestedSortBuilder nestedSort = new NestedSortBuilder("path"); if (randomBoolean()) { nestedSort.setFilter(AbstractSortTestCase.randomNestedFilter()); } diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java index bc5ba1917b2c5..6f9e3649ae679 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java @@ -16,6 +16,7 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; +import org.apache.lucene.util.Accountable; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.Strings; @@ -41,6 +42,7 @@ import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.query.support.NestedScope; import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; @@ -443,7 +445,7 @@ public BigArrays bigArrays() { } @Override - public ObjectMapper getObjectMapper(String path) { + public NestedLookup nestedLookup() { throw new UnsupportedOperationException(); } @@ -592,11 +594,17 @@ protected SearchExecutionContext createSearchExecutionContext(MapperService mapp IndexSettings indexSettings = new IndexSettings(indexMetadata, Settings.EMPTY); final SimilarityService similarityService = new SimilarityService(indexSettings, null, Map.of()); final long nowInMillis = randomNonNegativeLong(); - return new SearchExecutionContext( - 0, - 0, - indexSettings, - null, + return new SearchExecutionContext(0, 0, indexSettings, new BitsetFilterCache(indexSettings, new BitsetFilterCache.Listener() { + @Override + public void onCache(ShardId shardId, Accountable accountable) { + + } + + @Override + public void onRemoval(ShardId shardId, Accountable accountable) { + + } + }), (ft, idxName, lookup) -> ft.fielddataBuilder(idxName, lookup) .build(new IndexFieldDataCache.None(), new NoneCircuitBreakerService()), mapperService, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/DocumentPermissions.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/DocumentPermissions.java index 69b5783c520b2..cd52d51e36a51 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/DocumentPermissions.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/DocumentPermissions.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.search.Queries; +import org.elasticsearch.index.mapper.NestedLookup; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.Rewriteable; @@ -170,8 +171,9 @@ private static void buildRoleQuery( failIfQueryUsesClient(queryBuilder, context); Query roleQuery = context.toQuery(queryBuilder).query(); filter.add(roleQuery, SHOULD); - if (context.hasNested()) { - NestedHelper nestedHelper = new NestedHelper(context::getObjectMapper, context::isFieldMapped); + NestedLookup nestedLookup = context.nestedLookup(); + if (nestedLookup != NestedLookup.EMPTY) { + NestedHelper nestedHelper = new NestedHelper(nestedLookup, context::isFieldMapped); if (nestedHelper.mightMatchNestedDocs(roleQuery)) { roleQuery = new BooleanQuery.Builder().add(roleQuery, FILTER).add(Queries.newNonNestedFilter(), FILTER).build(); } diff --git a/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/mapper/DenseVectorFieldMapper.java b/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/mapper/DenseVectorFieldMapper.java index ac6566a0188a2..c6da697d5ed6e 100644 --- a/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/mapper/DenseVectorFieldMapper.java +++ b/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/mapper/DenseVectorFieldMapper.java @@ -502,7 +502,7 @@ public FieldMapper.Builder getMergeBuilder() { @Override public void doValidate(MappingLookup mappers) { - if (indexed && mappers.getNestedParent(name()) != null) { + if (indexed && mappers.nestedLookup().getNestedParent(name()) != null) { throw new IllegalArgumentException("[" + CONTENT_TYPE + "] fields cannot be indexed if they're" + " within [nested] mappings"); } } diff --git a/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java b/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java index 4189268897fec..77254a2ee89b9 100644 --- a/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java +++ b/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java @@ -59,6 +59,7 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperBuilderContext; import org.elasticsearch.index.mapper.MapperTestCase; +import org.elasticsearch.index.mapper.NestedLookup; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.plugins.Plugin; @@ -1102,6 +1103,11 @@ protected final SearchExecutionContext createMockContext() { public MappedFieldType getFieldType(String name) { return provideMappedFieldType(name); } + + @Override + public NestedLookup nestedLookup() { + return NestedLookup.EMPTY; + } }; } From f2cb9100ffee6357f7cd64b65f611c0bab44a614 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 26 Jan 2022 14:36:15 +0100 Subject: [PATCH 041/100] Speed up MappingStats Computation on Coordinating Node (#82830) We can exploit the mapping deduplication logic to save deserializing the same mapping repeatedly here. This should fix extremly long running computations when the cache needs to be refreshed for these stats in the common case of many duplicate mappings in a cluster. In a follow-up we can probably do the same for `AnalysisStats` as well. --- docs/changelog/82830.yaml | 5 + .../admin/cluster/stats/ClusterStatsIT.java | 3 +- .../admin/cluster/stats/AnalysisStats.java | 47 ++-- .../admin/cluster/stats/FieldScriptStats.java | 10 +- .../admin/cluster/stats/MappingStats.java | 126 +++++----- .../cluster/metadata/Metadata.java | 2 +- .../cluster/stats/AnalysisStatsTests.java | 40 ++- .../cluster/stats/MappingStatsTests.java | 233 +++++++++++++----- 8 files changed, 308 insertions(+), 158 deletions(-) create mode 100644 docs/changelog/82830.yaml diff --git a/docs/changelog/82830.yaml b/docs/changelog/82830.yaml new file mode 100644 index 0000000000000..e35dda2a6a0d7 --- /dev/null +++ b/docs/changelog/82830.yaml @@ -0,0 +1,5 @@ +pr: 82830 +summary: Speed up `MappingStats` Computation on Coordinating Node +area: Stats +type: enhancement +issues: [] diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java index 9d4b23e1206b4..0a315dc7b8f99 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java @@ -29,6 +29,7 @@ import java.io.IOException; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.ExecutionException; @@ -276,7 +277,7 @@ public void testFieldTypes() { }""").get(); response = client().admin().cluster().prepareClusterStats().get(); assertThat(response.getIndicesStats().getMappings().getFieldTypeStats().size(), equalTo(3)); - Set stats = response.getIndicesStats().getMappings().getFieldTypeStats(); + List stats = response.getIndicesStats().getMappings().getFieldTypeStats(); for (FieldStats stat : stats) { if (stat.getName().equals("integer")) { assertThat(stat.getCount(), greaterThanOrEqualTo(1)); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/AnalysisStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/AnalysisStats.java index 172f0d935476d..ed8a30e4ba34d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/AnalysisStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/AnalysisStats.java @@ -27,6 +27,7 @@ import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; +import java.util.IdentityHashMap; import java.util.LinkedHashSet; import java.util.List; import java.util.Map; @@ -51,6 +52,7 @@ public static AnalysisStats of(Metadata metadata, Runnable ensureNotCancelled) { final Map usedBuiltInTokenFilters = new HashMap<>(); final Map usedBuiltInAnalyzers = new HashMap<>(); + final Map mappingCounts = new IdentityHashMap<>(metadata.getMappingsByHash().size()); for (IndexMetadata indexMetadata : metadata) { ensureNotCancelled.run(); if (indexMetadata.isSystem()) { @@ -58,23 +60,6 @@ public static AnalysisStats of(Metadata metadata, Runnable ensureNotCancelled) { // we care about the user's indices. continue; } - Set indexAnalyzers = new HashSet<>(); - MappingMetadata mappingMetadata = indexMetadata.mapping(); - if (mappingMetadata != null) { - MappingVisitor.visitMapping(mappingMetadata.getSourceAsMap(), (field, fieldMapping) -> { - for (String key : new String[] { "analyzer", "search_analyzer", "search_quote_analyzer" }) { - Object analyzerO = fieldMapping.get(key); - if (analyzerO != null) { - final String analyzer = analyzerO.toString(); - IndexFeatureStats stats = usedBuiltInAnalyzers.computeIfAbsent(analyzer, IndexFeatureStats::new); - stats.count++; - if (indexAnalyzers.add(analyzer)) { - stats.indexCount++; - } - } - } - }); - } Set indexCharFilters = new HashSet<>(); Set indexTokenizers = new HashSet<>(); @@ -133,7 +118,27 @@ public static AnalysisStats of(Metadata metadata, Runnable ensureNotCancelled) { Map tokenFilterSettings = indexSettings.getGroups("index.analysis.filter"); usedBuiltInTokenFilters.keySet().removeAll(tokenFilterSettings.keySet()); aggregateAnalysisTypes(tokenFilterSettings.values(), usedTokenFilterTypes, indexTokenFilterTypes); + countMapping(mappingCounts, indexMetadata); + } + for (Map.Entry mappingAndCount : mappingCounts.entrySet()) { + ensureNotCancelled.run(); + Set indexAnalyzers = new HashSet<>(); + final int count = mappingAndCount.getValue(); + MappingVisitor.visitMapping(mappingAndCount.getKey().getSourceAsMap(), (field, fieldMapping) -> { + for (String key : new String[] { "analyzer", "search_analyzer", "search_quote_analyzer" }) { + Object analyzerO = fieldMapping.get(key); + if (analyzerO != null) { + final String analyzer = analyzerO.toString(); + IndexFeatureStats stats = usedBuiltInAnalyzers.computeIfAbsent(analyzer, IndexFeatureStats::new); + stats.count += count; + if (indexAnalyzers.add(analyzer)) { + stats.indexCount += count; + } + } + } + }); } + return new AnalysisStats( usedCharFilterTypes.values(), usedTokenizerTypes.values(), @@ -146,6 +151,14 @@ public static AnalysisStats of(Metadata metadata, Runnable ensureNotCancelled) { ); } + public static void countMapping(Map mappingCounts, IndexMetadata indexMetadata) { + final MappingMetadata mappingMetadata = indexMetadata.mapping(); + if (mappingMetadata == null) { + return; + } + mappingCounts.compute(mappingMetadata, (k, count) -> count == null ? 1 : count + 1); + } + private static void aggregateAnalysisTypes( Collection settings, Map stats, diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/FieldScriptStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/FieldScriptStats.java index a8cef70072a78..d3ed4203cb2fd 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/FieldScriptStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/FieldScriptStats.java @@ -68,14 +68,14 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - void update(int chars, long lines, int sourceUsages, int docUsages) { + void update(int chars, long lines, int sourceUsages, int docUsages, int count) { this.maxChars = Math.max(this.maxChars, chars); - this.totalChars += chars; + this.totalChars += (long) chars * count; this.maxLines = Math.max(this.maxLines, lines); - this.totalLines += lines; - this.totalSourceUsages += sourceUsages; + this.totalLines += lines * count; + this.totalSourceUsages += (long) sourceUsages * count; this.maxSourceUsages = Math.max(this.maxSourceUsages, sourceUsages); - this.totalDocUsages += docUsages; + this.totalDocUsages += (long) docUsages * count; this.maxDocUsages = Math.max(this.maxDocUsages, docUsages); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java index 0c4276833cb3c..ebcd664abbd61 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java @@ -25,7 +25,7 @@ import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; -import java.util.LinkedHashSet; +import java.util.IdentityHashMap; import java.util.List; import java.util.Map; import java.util.Objects; @@ -48,85 +48,87 @@ public static MappingStats of(Metadata metadata, Runnable ensureNotCancelled) { Map fieldTypes = new HashMap<>(); Set concreteFieldNames = new HashSet<>(); Map runtimeFieldTypes = new HashMap<>(); + final Map mappingCounts = new IdentityHashMap<>(metadata.getMappingsByHash().size()); for (IndexMetadata indexMetadata : metadata) { - ensureNotCancelled.run(); if (indexMetadata.isSystem()) { // Don't include system indices in statistics about mappings, // we care about the user's indices. continue; } + AnalysisStats.countMapping(mappingCounts, indexMetadata); + } + for (Map.Entry mappingAndCount : mappingCounts.entrySet()) { + ensureNotCancelled.run(); Set indexFieldTypes = new HashSet<>(); Set indexRuntimeFieldTypes = new HashSet<>(); - MappingMetadata mappingMetadata = indexMetadata.mapping(); - if (mappingMetadata != null) { - final Map map = mappingMetadata.getSourceAsMap(); - MappingVisitor.visitMapping(map, (field, fieldMapping) -> { - concreteFieldNames.add(field); - String type = null; - Object typeO = fieldMapping.get("type"); - if (typeO != null) { - type = typeO.toString(); - } else if (fieldMapping.containsKey("properties")) { - type = "object"; - } - if (type != null) { - FieldStats stats = fieldTypes.computeIfAbsent(type, FieldStats::new); - stats.count++; - if (indexFieldTypes.add(type)) { - stats.indexCount++; - } - Object scriptObject = fieldMapping.get("script"); - if (scriptObject instanceof Map script) { - Object sourceObject = script.get("source"); - stats.scriptCount++; - updateScriptParams(sourceObject, stats.fieldScriptStats); - Object langObject = script.get("lang"); - if (langObject != null) { - stats.scriptLangs.add(langObject.toString()); - } - } - } - }); - - MappingVisitor.visitRuntimeMapping(map, (field, fieldMapping) -> { - Object typeObject = fieldMapping.get("type"); - if (typeObject == null) { - return; - } - String type = typeObject.toString(); - RuntimeFieldStats stats = runtimeFieldTypes.computeIfAbsent(type, RuntimeFieldStats::new); - stats.count++; - if (indexRuntimeFieldTypes.add(type)) { - stats.indexCount++; - } - if (concreteFieldNames.contains(field)) { - stats.shadowedCount++; + final int count = mappingAndCount.getValue(); + final Map map = mappingAndCount.getKey().getSourceAsMap(); + MappingVisitor.visitMapping(map, (field, fieldMapping) -> { + concreteFieldNames.add(field); + String type = null; + Object typeO = fieldMapping.get("type"); + if (typeO != null) { + type = typeO.toString(); + } else if (fieldMapping.containsKey("properties")) { + type = "object"; + } + if (type != null) { + FieldStats stats = fieldTypes.computeIfAbsent(type, FieldStats::new); + stats.count += count; + if (indexFieldTypes.add(type)) { + stats.indexCount += count; } Object scriptObject = fieldMapping.get("script"); - if (scriptObject == null) { - stats.scriptLessCount++; - } else if (scriptObject instanceof Map script) { + if (scriptObject instanceof Map script) { Object sourceObject = script.get("source"); - updateScriptParams(sourceObject, stats.fieldScriptStats); + stats.scriptCount += count; + updateScriptParams(sourceObject, stats.fieldScriptStats, count); Object langObject = script.get("lang"); if (langObject != null) { stats.scriptLangs.add(langObject.toString()); } } - }); - } + } + }); + + MappingVisitor.visitRuntimeMapping(map, (field, fieldMapping) -> { + Object typeObject = fieldMapping.get("type"); + if (typeObject == null) { + return; + } + String type = typeObject.toString(); + RuntimeFieldStats stats = runtimeFieldTypes.computeIfAbsent(type, RuntimeFieldStats::new); + stats.count += count; + if (indexRuntimeFieldTypes.add(type)) { + stats.indexCount += count; + } + if (concreteFieldNames.contains(field)) { + stats.shadowedCount += count; + } + Object scriptObject = fieldMapping.get("script"); + if (scriptObject == null) { + stats.scriptLessCount += count; + } else if (scriptObject instanceof Map script) { + Object sourceObject = script.get("source"); + updateScriptParams(sourceObject, stats.fieldScriptStats, count); + Object langObject = script.get("lang"); + if (langObject != null) { + stats.scriptLangs.add(langObject.toString()); + } + } + }); } return new MappingStats(fieldTypes.values(), runtimeFieldTypes.values()); } - private static void updateScriptParams(Object scriptSourceObject, FieldScriptStats scriptStats) { + private static void updateScriptParams(Object scriptSourceObject, FieldScriptStats scriptStats, int multiplier) { if (scriptSourceObject != null) { String scriptSource = scriptSourceObject.toString(); int chars = scriptSource.length(); long lines = scriptSource.lines().count(); int docUsages = countOccurrences(scriptSource, DOC_PATTERN); int sourceUsages = countOccurrences(scriptSource, SOURCE_PATTERN); - scriptStats.update(chars, lines, sourceUsages, docUsages); + scriptStats.update(chars, lines, sourceUsages, docUsages, multiplier); } } @@ -139,21 +141,21 @@ private static int countOccurrences(String script, Pattern pattern) { return occurrences; } - private final Set fieldTypeStats; - private final Set runtimeFieldStats; + private final List fieldTypeStats; + private final List runtimeFieldStats; MappingStats(Collection fieldTypeStats, Collection runtimeFieldStats) { List stats = new ArrayList<>(fieldTypeStats); stats.sort(Comparator.comparing(IndexFeatureStats::getName)); - this.fieldTypeStats = Collections.unmodifiableSet(new LinkedHashSet<>(stats)); + this.fieldTypeStats = Collections.unmodifiableList(stats); List runtimeStats = new ArrayList<>(runtimeFieldStats); runtimeStats.sort(Comparator.comparing(RuntimeFieldStats::type)); - this.runtimeFieldStats = Collections.unmodifiableSet(new LinkedHashSet<>(runtimeStats)); + this.runtimeFieldStats = Collections.unmodifiableList(runtimeStats); } MappingStats(StreamInput in) throws IOException { - fieldTypeStats = Collections.unmodifiableSet(new LinkedHashSet<>(in.readList(FieldStats::new))); - runtimeFieldStats = Collections.unmodifiableSet(new LinkedHashSet<>(in.readList(RuntimeFieldStats::new))); + fieldTypeStats = Collections.unmodifiableList(in.readList(FieldStats::new)); + runtimeFieldStats = Collections.unmodifiableList(in.readList(RuntimeFieldStats::new)); } @Override @@ -165,14 +167,14 @@ public void writeTo(StreamOutput out) throws IOException { /** * Return stats about field types. */ - public Set getFieldTypeStats() { + public List getFieldTypeStats() { return fieldTypeStats; } /** * Return stats about runtime field types. */ - public Set getRuntimeFieldStats() { + public List getRuntimeFieldStats() { return runtimeFieldStats; } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java index 836a2ffba5c74..25ddbbccd9c3a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java @@ -949,7 +949,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - Map getMappingsByHash() { + public Map getMappingsByHash() { return mappingsByHash; } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/AnalysisStatsTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/AnalysisStatsTests.java index 6c3c81eb45dd5..66cf0ccdc6a14 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/AnalysisStatsTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/AnalysisStatsTests.java @@ -232,13 +232,39 @@ public void testAccountsRegularIndices() { .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 4) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) .build(); - IndexMetadata.Builder indexMetadata = new IndexMetadata.Builder("foo").settings(settings).putMapping(mapping); - Metadata metadata = new Metadata.Builder().put(indexMetadata).build(); - AnalysisStats analysisStats = AnalysisStats.of(metadata, () -> {}); - IndexFeatureStats expectedStats = new IndexFeatureStats("german"); - expectedStats.count = 1; - expectedStats.indexCount = 1; - assertEquals(Collections.singleton(expectedStats), analysisStats.getUsedBuiltInAnalyzers()); + Metadata metadata = new Metadata.Builder().put(new IndexMetadata.Builder("foo").settings(settings).putMapping(mapping)).build(); + { + AnalysisStats analysisStats = AnalysisStats.of(metadata, () -> {}); + IndexFeatureStats expectedStats = new IndexFeatureStats("german"); + expectedStats.count = 1; + expectedStats.indexCount = 1; + assertEquals(Collections.singleton(expectedStats), analysisStats.getUsedBuiltInAnalyzers()); + } + + Metadata metadata2 = Metadata.builder(metadata) + .put(new IndexMetadata.Builder("bar").settings(settings).putMapping(mapping)) + .build(); + { + AnalysisStats analysisStats = AnalysisStats.of(metadata2, () -> {}); + IndexFeatureStats expectedStats = new IndexFeatureStats("german"); + expectedStats.count = 2; + expectedStats.indexCount = 2; + assertEquals(Collections.singleton(expectedStats), analysisStats.getUsedBuiltInAnalyzers()); + } + + Metadata metadata3 = Metadata.builder(metadata2).put(new IndexMetadata.Builder("baz").settings(settings).putMapping(""" + {"properties":{"bar1":{"type":"text","analyzer":"french"}, + "bar2":{"type":"text","analyzer":"french"},"bar3":{"type":"text","analyzer":"french"}}}""")).build(); + { + AnalysisStats analysisStats = AnalysisStats.of(metadata3, () -> {}); + IndexFeatureStats expectedStatsGerman = new IndexFeatureStats("german"); + expectedStatsGerman.count = 2; + expectedStatsGerman.indexCount = 2; + IndexFeatureStats expectedStatsFrench = new IndexFeatureStats("french"); + expectedStatsFrench.count = 3; + expectedStatsFrench.indexCount = 1; + assertEquals(Set.of(expectedStatsGerman, expectedStatsFrench), analysisStats.getUsedBuiltInAnalyzers()); + } } public void testIgnoreSystemIndices() { diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/MappingStatsTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/MappingStatsTests.java index a639d628536bf..6f5c53b199095 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/MappingStatsTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/MappingStatsTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.test.VersionUtils; +import org.hamcrest.Matchers; import java.io.IOException; import java.util.ArrayList; @@ -29,73 +30,70 @@ public class MappingStatsTests extends AbstractWireSerializingTestCase { - public void testToXContent() { - Settings settings = Settings.builder() - .put("index.number_of_replicas", 0) - .put("index.number_of_shards", 1) - .put("index.version.created", Version.CURRENT) - .build(); - Script script1 = new Script("doc['field'] + doc.field + params._source.field"); - Script script2 = new Script("doc['field']"); - Script script3 = new Script("params._source.field + params._source.field \n + params._source.field"); - Script script4 = new Script("params._source.field"); - String mapping = """ - { - "runtime": { - "keyword1": { - "type": "keyword", - "script": %s - }, - "keyword2": { - "type": "keyword" - }, - "object.keyword3": { - "type": "keyword", - "script": %s - }, - "long": { - "type": "long", - "script": %s - }, - "long2": { - "type": "long", - "script": %s - } + private static final Settings SINGLE_SHARD_NO_REPLICAS = Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", 1) + .put("index.version.created", Version.CURRENT) + .build(); + + public static final String MAPPING_TEMPLATE = """ + { + "runtime": { + "keyword1": { + "type": "keyword", + "script": %s + }, + "keyword2": { + "type": "keyword" + }, + "object.keyword3": { + "type": "keyword", + "script": %s }, - "properties": { - "object": { - "type": "object", - "properties": { - "keyword3": { - "type": "keyword" - } + "long": { + "type": "long", + "script": %s + }, + "long2": { + "type": "long", + "script": %s + } + }, + "properties": { + "object": { + "type": "object", + "properties": { + "keyword3": { + "type": "keyword" } - }, - "long3": { - "type": "long", - "script": %s - }, - "long4": { - "type": "long", - "script": %s - }, - "keyword3": { - "type": "keyword", - "script": %s } + }, + "long3": { + "type": "long", + "script": %s + }, + "long4": { + "type": "long", + "script": %s + }, + "keyword3": { + "type": "keyword", + "script": %s } - }""".formatted( - Strings.toString(script1), - Strings.toString(script2), - Strings.toString(script3), - Strings.toString(script4), - Strings.toString(script3), - Strings.toString(script4), - Strings.toString(script1) - ); - IndexMetadata meta = IndexMetadata.builder("index").settings(settings).putMapping(mapping).build(); - IndexMetadata meta2 = IndexMetadata.builder("index2").settings(settings).putMapping(mapping).build(); + } + }"""; + + private static final String SCRIPT_1 = scriptAsJSON("doc['field'] + doc.field + params._source.field"); + private static final String SCRIPT_2 = scriptAsJSON("doc['field']"); + private static final String SCRIPT_3 = scriptAsJSON("params._source.field + params._source.field \n + params._source.field"); + private static final String SCRIPT_4 = scriptAsJSON("params._source.field"); + + public void testToXContent() { + String mapping = MAPPING_TEMPLATE.formatted(SCRIPT_1, SCRIPT_2, SCRIPT_3, SCRIPT_4, SCRIPT_3, SCRIPT_4, SCRIPT_1); + IndexMetadata meta = IndexMetadata.builder("index").settings(SINGLE_SHARD_NO_REPLICAS).putMapping(mapping).build(); + IndexMetadata meta2 = IndexMetadata.builder("index2").settings(SINGLE_SHARD_NO_REPLICAS).putMapping(mapping).build(); Metadata metadata = Metadata.builder().put(meta, false).put(meta2, false).build(); + assertThat(metadata.getMappingsByHash(), Matchers.aMapWithSize(1)); MappingStats mappingStats = MappingStats.of(metadata, () -> {}); assertEquals(""" { @@ -184,6 +182,109 @@ public void testToXContent() { }""", Strings.toString(mappingStats, true, true)); } + public void testToXContentWithSomeSharedMappings() { + IndexMetadata meta = IndexMetadata.builder("index") + .settings(SINGLE_SHARD_NO_REPLICAS) + .putMapping(MAPPING_TEMPLATE.formatted(SCRIPT_1, SCRIPT_2, SCRIPT_3, SCRIPT_4, SCRIPT_3, SCRIPT_4, SCRIPT_1)) + .build(); + // make mappings that are slightly different because we shuffled 2 scripts between fields + final String mappingString2 = MAPPING_TEMPLATE.formatted(SCRIPT_1, SCRIPT_2, SCRIPT_3, SCRIPT_4, SCRIPT_4, SCRIPT_3, SCRIPT_1); + IndexMetadata meta2 = IndexMetadata.builder("index2").settings(SINGLE_SHARD_NO_REPLICAS).putMapping(mappingString2).build(); + IndexMetadata meta3 = IndexMetadata.builder("index3").settings(SINGLE_SHARD_NO_REPLICAS).putMapping(mappingString2).build(); + Metadata metadata = Metadata.builder().put(meta, false).put(meta2, false).put(meta3, false).build(); + assertThat(metadata.getMappingsByHash(), Matchers.aMapWithSize(2)); + MappingStats mappingStats = MappingStats.of(metadata, () -> {}); + assertEquals(""" + { + "mappings" : { + "field_types" : [ + { + "name" : "keyword", + "count" : 6, + "index_count" : 3, + "script_count" : 3, + "lang" : [ + "painless" + ], + "lines_max" : 1, + "lines_total" : 3, + "chars_max" : 47, + "chars_total" : 141, + "source_max" : 1, + "source_total" : 3, + "doc_max" : 2, + "doc_total" : 6 + }, + { + "name" : "long", + "count" : 6, + "index_count" : 3, + "script_count" : 6, + "lang" : [ + "painless" + ], + "lines_max" : 2, + "lines_total" : 9, + "chars_max" : 68, + "chars_total" : 264, + "source_max" : 3, + "source_total" : 12, + "doc_max" : 0, + "doc_total" : 0 + }, + { + "name" : "object", + "count" : 3, + "index_count" : 3, + "script_count" : 0 + } + ], + "runtime_field_types" : [ + { + "name" : "keyword", + "count" : 9, + "index_count" : 3, + "scriptless_count" : 3, + "shadowed_count" : 3, + "lang" : [ + "painless" + ], + "lines_max" : 1, + "lines_total" : 6, + "chars_max" : 47, + "chars_total" : 177, + "source_max" : 1, + "source_total" : 3, + "doc_max" : 2, + "doc_total" : 9 + }, + { + "name" : "long", + "count" : 6, + "index_count" : 3, + "scriptless_count" : 0, + "shadowed_count" : 0, + "lang" : [ + "painless" + ], + "lines_max" : 2, + "lines_total" : 9, + "chars_max" : 68, + "chars_total" : 264, + "source_max" : 3, + "source_total" : 12, + "doc_max" : 0, + "doc_total" : 0 + } + ] + } + }""", Strings.toString(mappingStats, true, true)); + } + + private static String scriptAsJSON(String script) { + return Strings.toString(new Script(script)); + } + @Override protected Reader instanceReader() { return MappingStats::new; @@ -219,6 +320,7 @@ private static FieldStats randomFieldStats(String type) { randomIntBetween(1, 100), randomLongBetween(100, 1000), randomIntBetween(1, 10), + randomIntBetween(1, 10), randomIntBetween(1, 10) ); } @@ -237,6 +339,7 @@ private static RuntimeFieldStats randomRuntimeFieldStats(String type) { randomIntBetween(1, 100), randomLongBetween(100, 1000), randomIntBetween(1, 10), + randomIntBetween(1, 10), randomIntBetween(1, 10) ); } @@ -285,7 +388,7 @@ public void testAccountsRegularIndices() { FieldStats expectedStats = new FieldStats("long"); expectedStats.count = 1; expectedStats.indexCount = 1; - assertEquals(Collections.singleton(expectedStats), mappingStats.getFieldTypeStats()); + assertEquals(Collections.singletonList(expectedStats), mappingStats.getFieldTypeStats()); } public void testIgnoreSystemIndices() { @@ -299,7 +402,7 @@ public void testIgnoreSystemIndices() { IndexMetadata.Builder indexMetadata = new IndexMetadata.Builder("foo").settings(settings).putMapping(mapping).system(true); Metadata metadata = new Metadata.Builder().put(indexMetadata).build(); MappingStats mappingStats = MappingStats.of(metadata, () -> {}); - assertEquals(Collections.emptySet(), mappingStats.getFieldTypeStats()); + assertEquals(Collections.emptyList(), mappingStats.getFieldTypeStats()); } public void testChecksForCancellation() { @@ -308,7 +411,7 @@ public void testChecksForCancellation() { .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 4) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) .build(); - IndexMetadata.Builder indexMetadata = new IndexMetadata.Builder("foo").settings(settings); + IndexMetadata.Builder indexMetadata = new IndexMetadata.Builder("foo").settings(settings).putMapping("{}"); Metadata metadata = new Metadata.Builder().put(indexMetadata).build(); expectThrows( TaskCancelledException.class, From 0f97a85254719096e31bf9ab419c2a80f14c23d2 Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Wed, 26 Jan 2022 15:09:56 +0100 Subject: [PATCH 042/100] Remove assertion about GCS credentials always being null when not set explicetly (#83139) After we fixed getting application credentials in a GCE environment in #82974, we can actually get credentials set automatically when creating a new GCS client Fixes #83131 --- .../repositories/gcs/GoogleCloudStorageServiceTests.java | 3 --- 1 file changed, 3 deletions(-) diff --git a/modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageServiceTests.java b/modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageServiceTests.java index f37dac8fb7db2..5531bef3a6af7 100644 --- a/modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageServiceTests.java +++ b/modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageServiceTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.repositories.gcs; -import com.google.auth.Credentials; import com.google.cloud.http.HttpTransportOptions; import com.google.cloud.storage.Storage; @@ -35,7 +34,6 @@ public class GoogleCloudStorageServiceTests extends ESTestCase { - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/83131") public void testClientInitializer() throws Exception { final String clientName = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); final TimeValue connectTimeValue = TimeValue.timeValueNanos(randomIntBetween(0, 2000000)); @@ -95,7 +93,6 @@ void notifyProxyIsSet(Proxy p) { ((HttpTransportOptions) storage.getOptions().getTransportOptions()).getReadTimeout(), Matchers.is((int) readTimeValue.millis()) ); - assertThat(storage.getOptions().getCredentials(), Matchers.nullValue(Credentials.class)); assertThat(proxy.get().toString(), equalTo("HTTP @ /192.168.52.15:8080")); } From 7f321fb9a25550f2dab1571c9ef87cf65d7d4890 Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Wed, 26 Jan 2022 16:12:19 +0100 Subject: [PATCH 043/100] Make ValidateJsonNoKeywordsTask gradle configuration cache ready (#82864) Generate mapper as part of task action to avoid early overhead if not used and make this configuration cache compliant --- .../internal/precommit/ValidateJsonNoKeywordsTask.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ValidateJsonNoKeywordsTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ValidateJsonNoKeywordsTask.java index 72f7420a720bc..95f7093bad0a0 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ValidateJsonNoKeywordsTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ValidateJsonNoKeywordsTask.java @@ -47,7 +47,6 @@ */ public class ValidateJsonNoKeywordsTask extends DefaultTask { - private final ObjectMapper mapper = new ObjectMapper().configure(JsonParser.Feature.ALLOW_COMMENTS, true); private File jsonKeywords; private File report; private FileCollection inputFiles; @@ -82,11 +81,12 @@ public File getReport() { @TaskAction public void validate(InputChanges inputChanges) { + final ObjectMapper mapper = new ObjectMapper().configure(JsonParser.Feature.ALLOW_COMMENTS, true); final Map> errors = new LinkedHashMap<>(); getLogger().debug("Loading keywords from {}", jsonKeywords.getName()); - final Map> languagesByKeyword = loadKeywords(); + final Map> languagesByKeyword = loadKeywords(mapper); // incrementally evaluate input files StreamSupport.stream(inputChanges.getFileChanges(getInputFiles()).spliterator(), false) @@ -173,7 +173,7 @@ public void validate(InputChanges inputChanges) { * * @return a mapping from keyword to languages. */ - private Map> loadKeywords() { + private Map> loadKeywords(ObjectMapper mapper) { Map> languagesByKeyword = new HashMap<>(); try { From 40cab1739a45fcb3e229770110d6c17d5c95cee0 Mon Sep 17 00:00:00 2001 From: Christos Soulios <1561376+csoulios@users.noreply.github.com> Date: Wed, 26 Jan 2022 18:03:12 +0200 Subject: [PATCH 044/100] TSDB: Change `_tsid` field to SortedDocValuesField (#83045) Since _tsid cannot be a multi-value field, this PR modifies the TimeSeriesIdFieldMapper so that _tsid is added as a SortedDocValuesField (instead of a SortedSetDocValuesField) Relates to #80276 --- .../plain/SortedOrdinalsIndexFieldData.java | 105 ++++++++++++++++++ .../index/mapper/TimeSeriesIdFieldMapper.java | 8 +- .../timeseries/TimeSeriesIndexSearcher.java | 11 +- .../timeseries/TimeSeriesAggregatorTests.java | 4 +- .../aggregations/AggregatorTestCase.java | 6 +- 5 files changed, 119 insertions(+), 15 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/index/fielddata/plain/SortedOrdinalsIndexFieldData.java diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/plain/SortedOrdinalsIndexFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/plain/SortedOrdinalsIndexFieldData.java new file mode 100644 index 0000000000000..46bbea8320097 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/fielddata/plain/SortedOrdinalsIndexFieldData.java @@ -0,0 +1,105 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.fielddata.plain; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.OrdinalMap; +import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.search.SortField; +import org.apache.lucene.search.SortedSetSortField; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; +import org.elasticsearch.index.fielddata.IndexFieldDataCache; +import org.elasticsearch.index.fielddata.LeafOrdinalsFieldData; +import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.script.field.ToScriptField; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.MultiValueMode; +import org.elasticsearch.search.aggregations.support.ValuesSourceType; +import org.elasticsearch.search.sort.BucketedSort; +import org.elasticsearch.search.sort.SortOrder; + +import static org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.sortMissingLast; + +public class SortedOrdinalsIndexFieldData extends AbstractIndexOrdinalsFieldData { + + public static class Builder implements IndexFieldData.Builder { + private final String name; + private final ToScriptField toScriptField; + private final ValuesSourceType valuesSourceType; + + public Builder(String name, ValuesSourceType valuesSourceType, ToScriptField toScriptField) { + this.name = name; + this.toScriptField = toScriptField; + this.valuesSourceType = valuesSourceType; + } + + @Override + public SortedOrdinalsIndexFieldData build(IndexFieldDataCache cache, CircuitBreakerService breakerService) { + return new SortedOrdinalsIndexFieldData(cache, name, valuesSourceType, breakerService, toScriptField); + } + } + + public SortedOrdinalsIndexFieldData( + IndexFieldDataCache cache, + String fieldName, + ValuesSourceType valuesSourceType, + CircuitBreakerService breakerService, + ToScriptField toScriptField + ) { + super(fieldName, valuesSourceType, cache, breakerService, toScriptField); + } + + @Override + public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode, Nested nested, boolean reverse) { + SortField sortField = new SortField(getFieldName(), SortField.Type.STRING, reverse); + sortField.setMissingValue( + sortMissingLast(missingValue) ^ reverse ? SortedSetSortField.STRING_LAST : SortedSetSortField.STRING_FIRST + ); + return sortField; + } + + @Override + public BucketedSort newBucketedSort( + BigArrays bigArrays, + Object missingValue, + MultiValueMode sortMode, + Nested nested, + SortOrder sortOrder, + DocValueFormat format, + int bucketSize, + BucketedSort.ExtraData extra + ) { + throw new IllegalArgumentException("only supported on numeric fields"); + } + + @Override + public LeafOrdinalsFieldData load(LeafReaderContext context) { + // Doc value fields are loaded using Lucene's DocValues#getSortedSet + // that can happily load SortedDocValues as well. + return new SortedSetBytesLeafFieldData(context.reader(), getFieldName(), toScriptField); + } + + @Override + public LeafOrdinalsFieldData loadDirect(LeafReaderContext context) { + return load(context); + } + + @Override + public OrdinalMap getOrdinalMap() { + return null; + } + + @Override + public boolean supportsGlobalOrdinalsMapping() { + return true; + } +} diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java index 07be000c8711b..160b3732d03d3 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java @@ -8,7 +8,7 @@ package org.elasticsearch.index.mapper; -import org.apache.lucene.document.SortedSetDocValuesField; +import org.apache.lucene.document.SortedDocValuesField; import org.apache.lucene.search.Query; import org.apache.lucene.util.ByteBlockPool; import org.apache.lucene.util.BytesRef; @@ -21,7 +21,7 @@ import org.elasticsearch.index.fielddata.FieldData; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.ScriptDocValues; -import org.elasticsearch.index.fielddata.plain.SortedSetOrdinalsIndexFieldData; +import org.elasticsearch.index.fielddata.plain.SortedOrdinalsIndexFieldData; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.script.field.DelegateDocValuesField; import org.elasticsearch.search.DocValueFormat; @@ -116,7 +116,7 @@ public DocValueFormat docValueFormat(String format, ZoneId timeZone) { public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, Supplier searchLookup) { failIfNoDocValues(); // TODO don't leak the TSID's binary format into the script - return new SortedSetOrdinalsIndexFieldData.Builder( + return new SortedOrdinalsIndexFieldData.Builder( name(), CoreValuesSourceType.KEYWORD, (dv, n) -> new DelegateDocValuesField( @@ -143,7 +143,7 @@ public void postParse(DocumentParserContext context) throws IOException { // SortedMap is expected to be sorted by key (field name) SortedMap dimensionFields = context.doc().getDimensionBytes(); BytesReference timeSeriesId = buildTsidField(dimensionFields); - context.doc().add(new SortedSetDocValuesField(fieldType().name(), timeSeriesId.toBytesRef())); + context.doc().add(new SortedDocValuesField(fieldType().name(), timeSeriesId.toBytesRef())); } public static BytesReference buildTsidField(SortedMap dimensionFields) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/timeseries/TimeSeriesIndexSearcher.java b/server/src/main/java/org/elasticsearch/search/aggregations/timeseries/TimeSeriesIndexSearcher.java index d9ba622e45176..327da1340ec04 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/timeseries/TimeSeriesIndexSearcher.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/timeseries/TimeSeriesIndexSearcher.java @@ -8,9 +8,10 @@ package org.elasticsearch.search.aggregations.timeseries; +import org.apache.lucene.index.DocValues; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.index.SortedNumericDocValues; -import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.LeafCollector; @@ -80,7 +81,7 @@ private static class LeafWalker { private final LeafCollector collector; private final Bits liveDocs; private final DocIdSetIterator iterator; - private final SortedSetDocValues tsids; + private final SortedDocValues tsids; private final SortedNumericDocValues timestamps; final int docBase; int docId; @@ -93,8 +94,8 @@ private static class LeafWalker { this.collector.setScorer(scorer); iterator = scorer.iterator(); docBase = context.docBase; - tsids = context.reader().getSortedSetDocValues(TimeSeriesIdFieldMapper.NAME); - timestamps = context.reader().getSortedNumericDocValues(DataStream.TimestampField.FIXED_TIMESTAMP_FIELD); + tsids = DocValues.getSorted(context.reader(), TimeSeriesIdFieldMapper.NAME); + timestamps = DocValues.getSortedNumeric(context.reader(), DataStream.TimestampField.FIXED_TIMESTAMP_FIELD); } void collectCurrent() throws IOException { @@ -106,7 +107,7 @@ boolean next() throws IOException { docId = iterator.nextDoc(); if (docId != DocIdSetIterator.NO_MORE_DOCS && (liveDocs == null || liveDocs.get(docId))) { if (tsids.advanceExact(docId)) { - BytesRef tsid = tsids.lookupOrd(tsids.nextOrd()); + BytesRef tsid = tsids.lookupOrd(tsids.ordValue()); if (timestamps.advanceExact(docId)) { this.timestamp = timestamps.nextValue(); if (tsid.equals(this.tsid) == false) { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/timeseries/TimeSeriesAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/timeseries/TimeSeriesAggregatorTests.java index 4e373f04eb732..113cfca21d16f 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/timeseries/TimeSeriesAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/timeseries/TimeSeriesAggregatorTests.java @@ -11,8 +11,8 @@ import org.apache.lucene.document.DoubleDocValuesField; import org.apache.lucene.document.FloatDocValuesField; import org.apache.lucene.document.NumericDocValuesField; +import org.apache.lucene.document.SortedDocValuesField; import org.apache.lucene.document.SortedNumericDocValuesField; -import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.search.MatchAllDocsQuery; @@ -102,7 +102,7 @@ public static void writeTS(RandomIndexWriter iw, long timestamp, Object[] dimens try (BytesStreamOutput out = new BytesStreamOutput()) { TimeSeriesIdFieldMapper.encodeTsid(out, dimensionFields); BytesReference timeSeriesId = out.bytes(); - fields.add(new SortedSetDocValuesField(TimeSeriesIdFieldMapper.NAME, timeSeriesId.toBytesRef())); + fields.add(new SortedDocValuesField(TimeSeriesIdFieldMapper.NAME, timeSeriesId.toBytesRef())); } // TODO: Handle metrics iw.addDocument(fields.stream().toList()); diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java index 9315b81bd09b1..52a3263c4baec 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java @@ -37,8 +37,6 @@ import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.SortedNumericSortField; -import org.apache.lucene.search.SortedSetSelector; -import org.apache.lucene.search.SortedSetSortField; import org.apache.lucene.search.Weight; import org.apache.lucene.store.Directory; import org.apache.lucene.util.Accountable; @@ -664,8 +662,8 @@ protected void tes IndexWriterConfig config = LuceneTestCase.newIndexWriterConfig(random(), new MockAnalyzer(random())); if (timeSeries) { Sort sort = new Sort( - new SortedSetSortField(TimeSeriesIdFieldMapper.NAME, false, SortedSetSelector.Type.MAX), - new SortedNumericSortField(DataStreamTimestampFieldMapper.DEFAULT_PATH, SortField.Type.LONG) + new SortField(TimeSeriesIdFieldMapper.NAME, SortField.Type.STRING, false), + new SortedNumericSortField(DataStreamTimestampFieldMapper.DEFAULT_PATH, SortField.Type.LONG, true) ); config.setIndexSort(sort); } From 63f228e24e14d92bdc8a27a96e381ea1f431d192 Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Wed, 26 Jan 2022 11:07:59 -0500 Subject: [PATCH 045/100] [DOCS] Re-add paragraph noting `doc_count` is approximate (#83154) This paragraph was accidentally removed as part of #79205. Also fixes a minor heading capitalization error. --- .../aggregations/bucket/terms-aggregation.asciidoc | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docs/reference/aggregations/bucket/terms-aggregation.asciidoc b/docs/reference/aggregations/bucket/terms-aggregation.asciidoc index 465acde1a1ab2..152c8d3b236e6 100644 --- a/docs/reference/aggregations/bucket/terms-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/terms-aggregation.asciidoc @@ -129,7 +129,7 @@ aggregation close to the `max_buckets` limit. You'll know you've gone too large if the request fails with a message about `max_buckets`. [[search-aggregations-bucket-terms-aggregation-shard-size]] -==== Shard Size +==== Shard size To get more accurate results, the `terms` agg fetches more than the top `size` terms from each shard. It fetches the top `shard_size` terms, @@ -157,6 +157,10 @@ NOTE: `shard_size` cannot be smaller than `size` (as it doesn't make much sens [[terms-agg-doc-count-error]] ==== Document count error +Even with a larger `shard_size` value, `doc_count` values for a `terms` +aggregation may be approximate. As a result, any sub-aggregations on the `terms` +aggregation may also be approximate. + `sum_other_doc_count` is the number of documents that didn't make it into the the top `size` terms. If this is greater than `0`, you can be sure that the `terms` agg had to throw away some buckets, either because they didn't fit into From 4bf9c534bcc77861f32d02d67b1238dc9f33be14 Mon Sep 17 00:00:00 2001 From: Dan Roscigno Date: Wed, 26 Jan 2022 11:12:29 -0500 Subject: [PATCH 046/100] Minor typo, attribute -> attributes (#83150) In the SAML configuration for Elasticsearch the settings for `attributes.principal` and `.groups` are listed in the detail as `attribute.`, missing `s` --- x-pack/docs/en/security/authentication/saml-guide.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/x-pack/docs/en/security/authentication/saml-guide.asciidoc b/x-pack/docs/en/security/authentication/saml-guide.asciidoc index 9654e43ca327c..e4f6bc23c1543 100644 --- a/x-pack/docs/en/security/authentication/saml-guide.asciidoc +++ b/x-pack/docs/en/security/authentication/saml-guide.asciidoc @@ -190,8 +190,8 @@ sp.logout:: proxies involved, but it will typically be +$\{kibana-url}/logout+ where _$\{kibana-url}_ is the base URL for your {kib} instance. -attribute.principal:: See <>. -attribute.groups:: See <>. +attributes.principal:: See <>. +attributes.groups:: See <>. [[saml-attributes-mapping]] ==== Attribute mapping From f5998468874ba7ea954f3ceb620411fc380a9401 Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 26 Jan 2022 16:27:35 +0000 Subject: [PATCH 047/100] Correct context for batched reroute notifications (#83019) Today the `BatchedRerouteService` completes its listeners in the thread context of the `reroute()` call that actually triggered the reroute, which will be the correct context only if no batching took place. With this commit we make sure to complete each listener in the context in which it was passed to the corresponding `reroute()` call. --- docs/changelog/83019.yaml | 5 +++++ .../cluster/routing/BatchedRerouteService.java | 11 ++++++++--- .../routing/BatchedRerouteServiceTests.java | 18 ++++++++++++++---- 3 files changed, 27 insertions(+), 7 deletions(-) create mode 100644 docs/changelog/83019.yaml diff --git a/docs/changelog/83019.yaml b/docs/changelog/83019.yaml new file mode 100644 index 0000000000000..413bf08335980 --- /dev/null +++ b/docs/changelog/83019.yaml @@ -0,0 +1,5 @@ +pr: 83019 +summary: Correct context for batched reroute notifications +area: Allocation +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/BatchedRerouteService.java b/server/src/main/java/org/elasticsearch/cluster/routing/BatchedRerouteService.java index 204900c173ff1..261dd564d6f9e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/BatchedRerouteService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/BatchedRerouteService.java @@ -13,6 +13,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterStateUpdateTask; @@ -56,6 +57,10 @@ public BatchedRerouteService(ClusterService clusterService, BiFunction listener) { + final ActionListener wrappedListener = ContextPreservingActionListener.wrapPreservingContext( + listener, + clusterService.getClusterApplierService().threadPool().getThreadContext() + ); final List> currentListeners; synchronized (mutex) { if (pendingRerouteListeners != null) { @@ -66,7 +71,7 @@ public final void reroute(String reason, Priority priority, ActionListener(1 + pendingRerouteListeners.size()); - currentListeners.add(listener); + currentListeners.add(wrappedListener); currentListeners.addAll(pendingRerouteListeners); pendingRerouteListeners.clear(); pendingRerouteListeners = currentListeners; @@ -85,7 +90,7 @@ public final void reroute(String reason, Priority priority, ActionListener(1); - currentListeners.add(listener); + currentListeners.add(wrappedListener); pendingRerouteListeners = currentListeners; pendingTaskPriority = priority; } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/BatchedRerouteServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/BatchedRerouteServiceTests.java index 67b6082550479..ca5623f8225e4 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/BatchedRerouteServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/BatchedRerouteServiceTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; @@ -35,6 +36,7 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.function.Function; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThan; public class BatchedRerouteServiceTests extends ESTestCase { @@ -100,16 +102,24 @@ public void onFailure(Exception e) { return s; }); + final ThreadContext threadContext = threadPool.getThreadContext(); + final String contextHeader = "test-context-header"; + final int iterations = scaledRandomIntBetween(1, 100); final CountDownLatch tasksSubmittedCountDown = new CountDownLatch(iterations); final CountDownLatch tasksCompletedCountDown = new CountDownLatch(iterations); final List actions = new ArrayList<>(iterations); final Function rerouteFromPriority = priority -> () -> { final AtomicBoolean alreadyRun = new AtomicBoolean(); - batchedRerouteService.reroute("reroute at " + priority, priority, ActionListener.wrap(() -> { - assertTrue(alreadyRun.compareAndSet(false, true)); - tasksCompletedCountDown.countDown(); - })); + try (ThreadContext.StoredContext ignored = threadContext.stashContext()) { + final String contextValue = randomAlphaOfLength(10); + threadContext.putHeader(contextHeader, contextValue); + batchedRerouteService.reroute("reroute at " + priority, priority, ActionListener.wrap(() -> { + assertTrue(alreadyRun.compareAndSet(false, true)); + assertThat(threadContext.getHeader(contextHeader), equalTo(contextValue)); + tasksCompletedCountDown.countDown(); + })); + } tasksSubmittedCountDown.countDown(); }; actions.add(rerouteFromPriority.apply(Priority.URGENT)); // ensure at least one URGENT priority reroute From 5c684c3e14bf62e3114a33b7836e1b5e3e7eb335 Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 26 Jan 2022 16:40:30 +0000 Subject: [PATCH 048/100] Correct context for CancellableSOCache listener (#83021) Today the `CancellableSingleObjectCache` completes its listeners in the thread context of the `get()` call that actually computes the value which will be the correct context only if no batching took place. With this commit we make sure to complete each listener in the context in which it was passed to the corresponding `get()` call. --- docs/changelog/83021.yaml | 5 +++ .../stats/TransportClusterStatsAction.java | 10 ++++-- .../util/CancellableSingleObjectCache.java | 10 +++++- .../CancellableSingleObjectCacheTests.java | 31 ++++++++++++++----- 4 files changed, 44 insertions(+), 12 deletions(-) create mode 100644 docs/changelog/83021.yaml diff --git a/docs/changelog/83021.yaml b/docs/changelog/83021.yaml new file mode 100644 index 0000000000000..c98e6f1b5aeee --- /dev/null +++ b/docs/changelog/83021.yaml @@ -0,0 +1,5 @@ +pr: 83021 +summary: Correct context for CancellableSOCache listener +area: Stats +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java index cf98957d4935d..dc4673b2ea561 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.CancellableSingleObjectCache; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.CommitStats; import org.elasticsearch.index.seqno.RetentionLeaseStats; @@ -69,8 +70,8 @@ public class TransportClusterStatsAction extends TransportNodesAction< private final NodeService nodeService; private final IndicesService indicesService; - private final MetadataStatsCache mappingStatsCache = new MetadataStatsCache<>(MappingStats::of); - private final MetadataStatsCache analysisStatsCache = new MetadataStatsCache<>(AnalysisStats::of); + private final MetadataStatsCache mappingStatsCache; + private final MetadataStatsCache analysisStatsCache; @Inject public TransportClusterStatsAction( @@ -95,6 +96,8 @@ public TransportClusterStatsAction( ); this.nodeService = nodeService; this.indicesService = indicesService; + this.mappingStatsCache = new MetadataStatsCache<>(threadPool.getThreadContext(), MappingStats::of); + this.analysisStatsCache = new MetadataStatsCache<>(threadPool.getThreadContext(), AnalysisStats::of); } @Override @@ -258,7 +261,8 @@ public void writeTo(StreamOutput out) throws IOException { private static class MetadataStatsCache extends CancellableSingleObjectCache { private final BiFunction function; - MetadataStatsCache(BiFunction function) { + MetadataStatsCache(ThreadContext threadContext, BiFunction function) { + super(threadContext); this.function = function; } diff --git a/server/src/main/java/org/elasticsearch/common/util/CancellableSingleObjectCache.java b/server/src/main/java/org/elasticsearch/common/util/CancellableSingleObjectCache.java index 8e3add3e54bfd..c09d00cc1961f 100644 --- a/server/src/main/java/org/elasticsearch/common/util/CancellableSingleObjectCache.java +++ b/server/src/main/java/org/elasticsearch/common/util/CancellableSingleObjectCache.java @@ -9,7 +9,9 @@ package org.elasticsearch.common.util; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.action.support.ListenableActionFuture; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.Nullable; import org.elasticsearch.tasks.TaskCancelledException; @@ -41,8 +43,14 @@ */ public abstract class CancellableSingleObjectCache { + private final ThreadContext threadContext; + private final AtomicReference currentCachedItemRef = new AtomicReference<>(); + protected CancellableSingleObjectCache(ThreadContext threadContext) { + this.threadContext = threadContext; + } + /** * Compute a new value for the cache. *

@@ -220,7 +228,7 @@ boolean addListener(ActionListener listener, BooleanSupplier isCancelled) ActionListener.completeWith(listener, () -> future.actionGet(0L)); } else { // Refresh is still pending; it's not cancelled because there are still references. - future.addListener(listener); + future.addListener(ContextPreservingActionListener.wrapPreservingContext(listener, threadContext)); final AtomicBoolean released = new AtomicBoolean(); cancellationChecks.add(() -> { if (released.get() == false && isCancelled.getAsBoolean() && released.compareAndSet(false, true)) { diff --git a/server/src/test/java/org/elasticsearch/common/util/CancellableSingleObjectCacheTests.java b/server/src/test/java/org/elasticsearch/common/util/CancellableSingleObjectCacheTests.java index 65e7415d55624..b0ff0b7233b27 100644 --- a/server/src/test/java/org/elasticsearch/common/util/CancellableSingleObjectCacheTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/CancellableSingleObjectCacheTests.java @@ -12,7 +12,9 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.StepListener; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; @@ -193,7 +195,8 @@ public void testExceptionCompletesListenersButIsNotCached() { public void testConcurrentRefreshesAndCancellation() throws InterruptedException { final ThreadPool threadPool = new TestThreadPool("test"); try { - final CancellableSingleObjectCache testCache = new CancellableSingleObjectCache<>() { + final ThreadContext threadContext = threadPool.getThreadContext(); + final CancellableSingleObjectCache testCache = new CancellableSingleObjectCache<>(threadContext) { @Override protected void refresh( String s, @@ -219,6 +222,7 @@ protected String getKey(String s) { final CountDownLatch startLatch = new CountDownLatch(1); final CountDownLatch finishLatch = new CountDownLatch(count); final BlockingQueue queue = ConcurrentCollections.newBlockingQueue(); + final String contextHeader = "test-context-header"; for (int i = 0; i < count; i++) { final boolean cancel = randomBoolean(); @@ -233,11 +237,14 @@ protected String getKey(String s) { final StepListener stepListener = new StepListener<>(); final AtomicBoolean isComplete = new AtomicBoolean(); final AtomicBoolean isCancelled = new AtomicBoolean(); - testCache.get( - input, - isCancelled::get, - ActionListener.runBefore(stepListener, () -> assertTrue(isComplete.compareAndSet(false, true))) - ); + try (ThreadContext.StoredContext ignored = threadContext.stashContext()) { + final String contextValue = randomAlphaOfLength(10); + threadContext.putHeader(contextHeader, contextValue); + testCache.get(input, isCancelled::get, ActionListener.runBefore(stepListener, () -> { + assertTrue(isComplete.compareAndSet(false, true)); + assertThat(threadContext.getHeader(contextHeader), equalTo(contextValue)); + })); + } final Runnable next = queue.poll(); if (next != null) { @@ -277,7 +284,9 @@ protected String getKey(String s) { public void testConcurrentRefreshesWithFreshnessCheck() throws InterruptedException { final ThreadPool threadPool = new TestThreadPool("test"); try { - final CancellableSingleObjectCache testCache = new CancellableSingleObjectCache<>() { + final CancellableSingleObjectCache testCache = new CancellableSingleObjectCache<>( + threadPool.getThreadContext() + ) { @Override protected void refresh( String s, @@ -380,7 +389,7 @@ public void run() { } }; - final CancellableSingleObjectCache testCache = new CancellableSingleObjectCache<>() { + final CancellableSingleObjectCache testCache = new CancellableSingleObjectCache<>(testThreadContext) { @Override protected void refresh( String s, @@ -424,10 +433,16 @@ protected String getKey(String s) { expectThrows(TaskCancelledException.class, () -> cancelledFuture.actionGet(0L)); } + private static final ThreadContext testThreadContext = new ThreadContext(Settings.EMPTY); + private static class TestCache extends CancellableSingleObjectCache { private final LinkedList>> pendingRefreshes = new LinkedList<>(); + private TestCache() { + super(testThreadContext); + } + @Override protected void refresh( String input, From 444f716ede41698e3c2682f7ad799c4f995cbca0 Mon Sep 17 00:00:00 2001 From: Kxrr Date: Thu, 27 Jan 2022 01:01:19 +0800 Subject: [PATCH 049/100] Fix typo in TESTING.asciidoc (#83120) `-Dtest.class` should be `-Dtests.class ` in TESTING.asciidoc --- TESTING.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/TESTING.asciidoc b/TESTING.asciidoc index 8aab31a0e599e..f1a6af9280a96 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -556,7 +556,7 @@ version 5.3.2 run: ./gradlew v5.3.2#bwcTest ------------------------------------------------- -Use -Dtest.class and -Dtests.method to run a specific bwcTest test. +Use -Dtests.class and -Dtests.method to run a specific bwcTest test. For example to run a specific tests from the x-pack rolling upgrade from 7.7.0: ------------------------------------------------- ./gradlew :x-pack:qa:rolling-upgrade:v7.7.0#bwcTest \ From 47d45d0a14347202cc41ad744164b90d171c3340 Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Wed, 26 Jan 2022 12:21:46 -0500 Subject: [PATCH 050/100] [DOCS] Archived settings block config updates (#82069) **Changes:** * Notes that archived cluster settings block cluster setting updates. Previously, the docs stated that ES ignored archived cluster settings. * Notes that archived index settings can block index settings updates. For example, it blocks `index.hidden` but not `number_of_replicas`. Previously, the docs stated that you could safely ignore archived index settings. Relates #78351 Closes #61175 --- .../upgrade/archived-settings.asciidoc | 41 +++++++++++-------- 1 file changed, 25 insertions(+), 16 deletions(-) diff --git a/docs/reference/upgrade/archived-settings.asciidoc b/docs/reference/upgrade/archived-settings.asciidoc index 9160df30d2e8c..6d3e15c46fe58 100644 --- a/docs/reference/upgrade/archived-settings.asciidoc +++ b/docs/reference/upgrade/archived-settings.asciidoc @@ -1,28 +1,32 @@ [[archived-settings]] == Archived settings -{es} typically removes support for deprecated settings at major version -releases. If you upgrade a cluster with a deprecated persistent cluster setting -to a version that no longer supports the setting, {es} automatically archives -that setting. Similarly, if you upgrade a cluster that contains an index with an +If you upgrade a cluster with a deprecated persistent cluster setting to a +version that no longer supports the setting, {es} automatically archives that +setting. Similarly, if you upgrade a cluster that contains an index with an unsupported index setting, {es} archives the index setting. -Archived settings start with the `archived.` prefix and are ignored by {es}. +We recommend you remove any archived settings after upgrading. Archived +settings are considered invalid and can interfere with your ability to configure +other settings. + +Archived settings start with the `archived.` prefix. [discrete] [[archived-cluster-settings]] === Archived cluster settings -After an upgrade, you can view archived cluster settings using the -<>. +Use the following <> request to +check for archived cluster settings. If the request returns an empty object +(`{ }`), there are no archived cluster settings. [source,console] ---- GET _cluster/settings?flat_settings=true&filter_path=persistent.archived* ---- -You can remove archived cluster settings using the -<>. +To remove any archived cluster settings, use the following +<> request. [source,console] ---- @@ -48,18 +52,23 @@ templates during an upgrade. Attempts to use a template that contains an unsupported index setting will fail and return an error. This includes automated operations, such the {ilm-init} rollover action. -You can view archived settings for an index using the <>. +Archived index settings don't affect an index's configuration or most index +operations, such as indexing or search. However, you'll need to remove them +before you can configure other settings for the index, such as `index.hidden`. + +Use the following <> request to get a +list indices with archived settings. If the request returns an empty object +(`{ }`), there are no archived index settings. [source,console] ---- -GET my-index/_settings?flat_settings=true&filter_path=**.settings.archived* +GET */_settings?flat_settings=true&filter_path=**.settings.archived* ---- // TEST[s/^/PUT my-index\n/] -Removing archived index settings requires a reindex after the upgrade. However, -reindexing can be resource intensive. Because {es} ignores archived settings, -you can safely leave them in place if wanted. +Removing an index's archived index settings requires a <>. +Reindexing can be resource-intensive and time-consuming. Before you start, test +the reindex with a subset of the data to estimate your time requirements. [source,console] ---- @@ -69,7 +78,7 @@ POST _reindex "index": "my-index" }, "dest": { - "index": "reindexed-v8-my-index" + "index": "reindexed-my-index" } } ---- From 563cb9e37be0d77e1568e7c0ba6913363fed0c51 Mon Sep 17 00:00:00 2001 From: Kevin Lacabane Date: Wed, 26 Jan 2022 18:53:24 +0100 Subject: [PATCH 051/100] fix monitoring-beats-mb template (#83160) --- .../main/resources/monitoring-beats-mb.json | 728 +++++++++--------- 1 file changed, 364 insertions(+), 364 deletions(-) diff --git a/x-pack/plugin/core/src/main/resources/monitoring-beats-mb.json b/x-pack/plugin/core/src/main/resources/monitoring-beats-mb.json index 787f78304ea0e..6c99d313f267c 100644 --- a/x-pack/plugin/core/src/main/resources/monitoring-beats-mb.json +++ b/x-pack/plugin/core/src/main/resources/monitoring-beats-mb.json @@ -1042,476 +1042,476 @@ }, "beats_stats": { "properties": { - "apm-server": { + "beat": { "properties": { - "acm": { + "host": { + "type": "alias", + "path": "beat.stats.beat.host" + }, + "name": { + "type": "alias", + "path": "beat.stats.beat.name" + }, + "type": { + "type": "alias", + "path": "beat.stats.beat.type" + }, + "uuid": { + "type": "alias", + "path": "beat.stats.beat.uuid" + }, + "version": { + "type": "alias", + "path": "beat.stats.beat.version" + } + } + }, + "metrics": { + "properties": { + "apm-server": { "properties": { - "request": { - "properties": { - "count": { - "type": "alias", - "path": "beat.stats.apm_server.acm.request.count" - } - } - }, - "response": { + "acm": { "properties": { - "count": { - "type": "alias", - "path": "beat.stats.apm_server.acm.response.count" - }, - "errors": { - "properties": { - "closed": { - "type": "alias", - "path": "beat.stats.apm_server.acm.response.errors.closed" - }, - "count": { - "type": "alias", - "path": "beat.stats.apm_server.acm.response.errors.count" - }, - "decode": { - "type": "alias", - "path": "beat.stats.apm_server.acm.response.errors.decode" - }, - "forbidden": { - "type": "alias", - "path": "beat.stats.apm_server.acm.response.errors.forbidden" - }, - "internal": { - "type": "alias", - "path": "beat.stats.apm_server.acm.response.errors.internal" - }, - "invalidquery": { - "type": "alias", - "path": "beat.stats.apm_server.acm.response.errors.invalidquery" - }, - "method": { - "type": "alias", - "path": "beat.stats.apm_server.acm.response.errors.method" - }, - "notfound": { - "type": "alias", - "path": "beat.stats.apm_server.acm.response.errors.notfound" - }, - "queue": { - "type": "alias", - "path": "beat.stats.apm_server.acm.response.errors.queue" - }, - "ratelimit": { - "type": "alias", - "path": "beat.stats.apm_server.acm.response.errors.ratelimit" - }, - "toolarge": { - "type": "alias", - "path": "beat.stats.apm_server.acm.response.errors.toolarge" - }, - "unauthorized": { - "type": "alias", - "path": "beat.stats.apm_server.acm.response.errors.unauthorized" - }, - "unavailable": { - "type": "alias", - "path": "beat.stats.apm_server.acm.response.errors.unavailable" - }, - "validate": { - "type": "alias", - "path": "beat.stats.apm_server.acm.response.errors.validate" - } - } - }, "request": { "properties": { "count": { "type": "alias", - "path": "beat.stats.apm_server.acm.response.request.count" + "path": "beat.stats.apm_server.acm.request.count" } } }, - "unset": { - "type": "alias", - "path": "beat.stats.apm_server.acm.response.unset" - }, - "valid": { + "response": { "properties": { - "accepted": { - "type": "alias", - "path": "beat.stats.apm_server.acm.response.valid.accepted" - }, "count": { "type": "alias", - "path": "beat.stats.apm_server.acm.response.valid.count" + "path": "beat.stats.apm_server.acm.response.count" }, - "notmodified": { - "type": "alias", - "path": "beat.stats.apm_server.acm.response.valid.notmodified" + "errors": { + "properties": { + "closed": { + "type": "alias", + "path": "beat.stats.apm_server.acm.response.errors.closed" + }, + "count": { + "type": "alias", + "path": "beat.stats.apm_server.acm.response.errors.count" + }, + "decode": { + "type": "alias", + "path": "beat.stats.apm_server.acm.response.errors.decode" + }, + "forbidden": { + "type": "alias", + "path": "beat.stats.apm_server.acm.response.errors.forbidden" + }, + "internal": { + "type": "alias", + "path": "beat.stats.apm_server.acm.response.errors.internal" + }, + "invalidquery": { + "type": "alias", + "path": "beat.stats.apm_server.acm.response.errors.invalidquery" + }, + "method": { + "type": "alias", + "path": "beat.stats.apm_server.acm.response.errors.method" + }, + "notfound": { + "type": "alias", + "path": "beat.stats.apm_server.acm.response.errors.notfound" + }, + "queue": { + "type": "alias", + "path": "beat.stats.apm_server.acm.response.errors.queue" + }, + "ratelimit": { + "type": "alias", + "path": "beat.stats.apm_server.acm.response.errors.ratelimit" + }, + "toolarge": { + "type": "alias", + "path": "beat.stats.apm_server.acm.response.errors.toolarge" + }, + "unauthorized": { + "type": "alias", + "path": "beat.stats.apm_server.acm.response.errors.unauthorized" + }, + "unavailable": { + "type": "alias", + "path": "beat.stats.apm_server.acm.response.errors.unavailable" + }, + "validate": { + "type": "alias", + "path": "beat.stats.apm_server.acm.response.errors.validate" + } + } }, - "ok": { + "request": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.acm.response.request.count" + } + } + }, + "unset": { "type": "alias", - "path": "beat.stats.apm_server.acm.response.valid.ok" + "path": "beat.stats.apm_server.acm.response.unset" + }, + "valid": { + "properties": { + "accepted": { + "type": "alias", + "path": "beat.stats.apm_server.acm.response.valid.accepted" + }, + "count": { + "type": "alias", + "path": "beat.stats.apm_server.acm.response.valid.count" + }, + "notmodified": { + "type": "alias", + "path": "beat.stats.apm_server.acm.response.valid.notmodified" + }, + "ok": { + "type": "alias", + "path": "beat.stats.apm_server.acm.response.valid.ok" + } + } } } } } - } - } - }, - "decoder": { - "properties": { - "deflate": { - "properties": { - "content-length": { - "type": "alias", - "path": "beat.stats.apm_server.decoder.deflate.content-length" - }, - "count": { - "type": "alias", - "path": "beat.stats.apm_server.decoder.deflate.count" - } - } }, - "gzip": { - "properties": { - "content-length": { - "type": "alias", - "path": "beat.stats.apm_server.decoder.gzip.content-length" - }, - "count": { - "type": "alias", - "path": "beat.stats.apm_server.decoder.gzip.count" - } - } - }, - "missing-content-length": { - "properties": { - "count": { - "type": "alias", - "path": "beat.stats.apm_server.decoder.missing-content-length.count" - } - } - }, - "reader": { - "properties": { - "count": { - "type": "alias", - "path": "beat.stats.apm_server.decoder.reader.count" - }, - "size": { - "type": "alias", - "path": "beat.stats.apm_server.decoder.reader.size" - } - } - }, - "uncompressed": { - "properties": { - "content-length": { - "type": "alias", - "path": "beat.stats.apm_server.decoder.uncompressed.content-length" - }, - "count": { - "type": "alias", - "path": "beat.stats.apm_server.decoder.uncompressed.count" - } - } - } - } - }, - "processor": { - "properties": { - "error": { + "decoder": { "properties": { - "decoding": { + "deflate": { "properties": { - "count": { + "content-length": { "type": "alias", - "path": "beat.stats.apm_server.processor.error.decoding.count" + "path": "beat.stats.apm_server.decoder.deflate.content-length" }, - "errors": { + "count": { "type": "alias", - "path": "beat.stats.apm_server.processor.error.decoding.errors" + "path": "beat.stats.apm_server.decoder.deflate.count" } } }, - "frames": { - "type": "alias", - "path": "beat.stats.apm_server.processor.error.frames" - }, - "spans": { - "type": "alias", - "path": "beat.stats.apm_server.processor.error.spans" - }, - "stacktraces": { - "type": "alias", - "path": "beat.stats.apm_server.processor.error.stacktraces" - }, - "transformations": { - "type": "alias", - "path": "beat.stats.apm_server.processor.error.transformations" - }, - "validation": { + "gzip": { "properties": { - "count": { + "content-length": { "type": "alias", - "path": "beat.stats.apm_server.processor.error.validation.count" + "path": "beat.stats.apm_server.decoder.gzip.content-length" }, - "errors": { - "type": "alias", - "path": "beat.stats.apm_server.processor.error.validation.errors" - } - } - } - } - }, - "metric": { - "properties": { - "decoding": { - "properties": { "count": { "type": "alias", - "path": "beat.stats.apm_server.processor.metric.decoding.count" - }, - "errors": { - "type": "alias", - "path": "beat.stats.apm_server.processor.metric.decoding.errors" + "path": "beat.stats.apm_server.decoder.gzip.count" } } }, - "transformations": { - "type": "alias", - "path": "beat.stats.apm_server.processor.metric.transformations" - }, - "validation": { + "missing-content-length": { "properties": { "count": { "type": "alias", - "path": "beat.stats.apm_server.processor.metric.validation.count" - }, - "errors": { - "type": "alias", - "path": "beat.stats.apm_server.processor.metric.validation.errors" + "path": "beat.stats.apm_server.decoder.missing-content-length.count" } } - } - } - }, - "sourcemap": { - "properties": { - "counter": { - "type": "alias", - "path": "beat.stats.apm_server.processor.sourcemap.counter" }, - "decoding": { + "reader": { "properties": { "count": { "type": "alias", - "path": "beat.stats.apm_server.processor.sourcemap.decoding.count" + "path": "beat.stats.apm_server.decoder.reader.count" }, - "errors": { + "size": { "type": "alias", - "path": "beat.stats.apm_server.processor.sourcemap.decoding.errors" + "path": "beat.stats.apm_server.decoder.reader.size" } } }, - "validation": { + "uncompressed": { "properties": { - "count": { + "content-length": { "type": "alias", - "path": "beat.stats.apm_server.processor.sourcemap.validation.count" + "path": "beat.stats.apm_server.decoder.uncompressed.content-length" }, - "errors": { + "count": { "type": "alias", - "path": "beat.stats.apm_server.processor.sourcemap.validation.errors" + "path": "beat.stats.apm_server.decoder.uncompressed.count" } } } } }, - "span": { - "properties": { - "transformations": { - "type": "alias", - "path": "beat.stats.apm_server.processor.span.transformations" - } - } - }, - "transaction": { + "processor": { "properties": { - "decoding": { + "error": { "properties": { - "count": { + "decoding": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.processor.error.decoding.count" + }, + "errors": { + "type": "alias", + "path": "beat.stats.apm_server.processor.error.decoding.errors" + } + } + }, + "frames": { "type": "alias", - "path": "beat.stats.apm_server.processor.transaction.decoding.count" + "path": "beat.stats.apm_server.processor.error.frames" }, - "errors": { + "spans": { "type": "alias", - "path": "beat.stats.apm_server.processor.transaction.decoding.errors" - } - } - }, - "frames": { - "type": "alias", - "path": "beat.stats.apm_server.processor.transaction.frames" - }, - "spans": { - "type": "alias", - "path": "beat.stats.apm_server.processor.transaction.spans" - }, - "stacktraces": { - "type": "alias", - "path": "beat.stats.apm_server.processor.transaction.stacktraces" - }, - "transactions": { - "type": "alias", - "path": "beat.stats.apm_server.processor.transaction.transactions" - }, - "transformations": { - "type": "alias", - "path": "beat.stats.apm_server.processor.transaction.transformations" - }, - "validation": { - "properties": { - "count": { + "path": "beat.stats.apm_server.processor.error.spans" + }, + "stacktraces": { "type": "alias", - "path": "beat.stats.apm_server.processor.transaction.validation.count" + "path": "beat.stats.apm_server.processor.error.stacktraces" }, - "errors": { + "transformations": { "type": "alias", - "path": "beat.stats.apm_server.processor.transaction.validation.errors" + "path": "beat.stats.apm_server.processor.error.transformations" + }, + "validation": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.processor.error.validation.count" + }, + "errors": { + "type": "alias", + "path": "beat.stats.apm_server.processor.error.validation.errors" + } + } } } - } - } - } - } - }, - "server": { - "properties": { - "concurrent": { - "properties": { - "wait": { + }, + "metric": { "properties": { - "ms": { + "decoding": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.processor.metric.decoding.count" + }, + "errors": { + "type": "alias", + "path": "beat.stats.apm_server.processor.metric.decoding.errors" + } + } + }, + "transformations": { "type": "alias", - "path": "beat.stats.apm_server.server.concurrent.wait.ms" + "path": "beat.stats.apm_server.processor.metric.transformations" + }, + "validation": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.processor.metric.validation.count" + }, + "errors": { + "type": "alias", + "path": "beat.stats.apm_server.processor.metric.validation.errors" + } + } } } - } - } - }, - "request": { - "properties": { - "count": { - "type": "alias", - "path": "beat.stats.apm_server.server.request.count" - } - } - }, - "response": { - "properties": { - "count": { - "type": "alias", - "path": "beat.stats.apm_server.server.response.count" }, - "errors": { + "sourcemap": { "properties": { - "closed": { - "type": "alias", - "path": "beat.stats.apm_server.server.response.errors.closed" - }, - "concurrency": { - "type": "alias", - "path": "beat.stats.apm_server.server.response.errors.concurrency" - }, - "count": { - "type": "alias", - "path": "beat.stats.apm_server.server.response.errors.count" - }, - "decode": { + "counter": { "type": "alias", - "path": "beat.stats.apm_server.server.response.errors.decode" + "path": "beat.stats.apm_server.processor.sourcemap.counter" }, - "forbidden": { - "type": "alias", - "path": "beat.stats.apm_server.server.response.errors.forbidden" + "decoding": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.processor.sourcemap.decoding.count" + }, + "errors": { + "type": "alias", + "path": "beat.stats.apm_server.processor.sourcemap.decoding.errors" + } + } }, - "internal": { + "validation": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.processor.sourcemap.validation.count" + }, + "errors": { + "type": "alias", + "path": "beat.stats.apm_server.processor.sourcemap.validation.errors" + } + } + } + } + }, + "span": { + "properties": { + "transformations": { "type": "alias", - "path": "beat.stats.apm_server.server.response.errors.internal" + "path": "beat.stats.apm_server.processor.span.transformations" + } + } + }, + "transaction": { + "properties": { + "decoding": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.processor.transaction.decoding.count" + }, + "errors": { + "type": "alias", + "path": "beat.stats.apm_server.processor.transaction.decoding.errors" + } + } }, - "method": { + "frames": { "type": "alias", - "path": "beat.stats.apm_server.server.response.errors.method" + "path": "beat.stats.apm_server.processor.transaction.frames" }, - "queue": { + "spans": { "type": "alias", - "path": "beat.stats.apm_server.server.response.errors.queue" + "path": "beat.stats.apm_server.processor.transaction.spans" }, - "ratelimit": { + "stacktraces": { "type": "alias", - "path": "beat.stats.apm_server.server.response.errors.ratelimit" + "path": "beat.stats.apm_server.processor.transaction.stacktraces" }, - "toolarge": { + "transactions": { "type": "alias", - "path": "beat.stats.apm_server.server.response.errors.toolarge" + "path": "beat.stats.apm_server.processor.transaction.transactions" }, - "unauthorized": { + "transformations": { "type": "alias", - "path": "beat.stats.apm_server.server.response.errors.unauthorized" + "path": "beat.stats.apm_server.processor.transaction.transformations" }, - "validate": { - "type": "alias", - "path": "beat.stats.apm_server.server.response.errors.validate" + "validation": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.processor.transaction.validation.count" + }, + "errors": { + "type": "alias", + "path": "beat.stats.apm_server.processor.transaction.validation.errors" + } + } + } + } + } + } + }, + "server": { + "properties": { + "concurrent": { + "properties": { + "wait": { + "properties": { + "ms": { + "type": "alias", + "path": "beat.stats.apm_server.server.concurrent.wait.ms" + } + } } } }, - "valid": { + "request": { "properties": { - "accepted": { + "count": { "type": "alias", - "path": "beat.stats.apm_server.server.response.valid.accepted" - }, + "path": "beat.stats.apm_server.server.request.count" + } + } + }, + "response": { + "properties": { "count": { "type": "alias", - "path": "beat.stats.apm_server.server.response.valid.count" + "path": "beat.stats.apm_server.server.response.count" }, - "ok": { - "type": "alias", - "path": "beat.stats.apm_server.server.response.valid.ok" + "errors": { + "properties": { + "closed": { + "type": "alias", + "path": "beat.stats.apm_server.server.response.errors.closed" + }, + "concurrency": { + "type": "alias", + "path": "beat.stats.apm_server.server.response.errors.concurrency" + }, + "count": { + "type": "alias", + "path": "beat.stats.apm_server.server.response.errors.count" + }, + "decode": { + "type": "alias", + "path": "beat.stats.apm_server.server.response.errors.decode" + }, + "forbidden": { + "type": "alias", + "path": "beat.stats.apm_server.server.response.errors.forbidden" + }, + "internal": { + "type": "alias", + "path": "beat.stats.apm_server.server.response.errors.internal" + }, + "method": { + "type": "alias", + "path": "beat.stats.apm_server.server.response.errors.method" + }, + "queue": { + "type": "alias", + "path": "beat.stats.apm_server.server.response.errors.queue" + }, + "ratelimit": { + "type": "alias", + "path": "beat.stats.apm_server.server.response.errors.ratelimit" + }, + "toolarge": { + "type": "alias", + "path": "beat.stats.apm_server.server.response.errors.toolarge" + }, + "unauthorized": { + "type": "alias", + "path": "beat.stats.apm_server.server.response.errors.unauthorized" + }, + "validate": { + "type": "alias", + "path": "beat.stats.apm_server.server.response.errors.validate" + } + } + }, + "valid": { + "properties": { + "accepted": { + "type": "alias", + "path": "beat.stats.apm_server.server.response.valid.accepted" + }, + "count": { + "type": "alias", + "path": "beat.stats.apm_server.server.response.valid.count" + }, + "ok": { + "type": "alias", + "path": "beat.stats.apm_server.server.response.valid.ok" + } + } } } } } } } - } - } - }, - "beat": { - "properties": { - "host": { - "type": "alias", - "path": "beat.stats.beat.host" - }, - "name": { - "type": "alias", - "path": "beat.stats.beat.name" - }, - "type": { - "type": "alias", - "path": "beat.stats.beat.type" - }, - "uuid": { - "type": "alias", - "path": "beat.stats.beat.uuid" }, - "version": { - "type": "alias", - "path": "beat.stats.beat.version" - } - } - }, - "metrics": { - "properties": { "beat": { "properties": { "cgroup": { From 305ff20b8f7bc9608501dcf5c6f271da4df25e3a Mon Sep 17 00:00:00 2001 From: Ugo Sangiorgi Date: Wed, 26 Jan 2022 14:00:40 -0500 Subject: [PATCH 052/100] [DOCS] Add missing HTML anchors to CCR and ML (#80287) --- docs/reference/ccr/apis/follow/put-follow.asciidoc | 4 ++-- .../ml/anomaly-detection/apis/put-job.asciidoc | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/reference/ccr/apis/follow/put-follow.asciidoc b/docs/reference/ccr/apis/follow/put-follow.asciidoc index 3b0ec303a23a5..d09eb51534042 100644 --- a/docs/reference/ccr/apis/follow/put-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/put-follow.asciidoc @@ -69,10 +69,10 @@ referenced leader index. When this API returns, the follower index exists, and [[ccr-put-follow-request-body]] ==== {api-request-body-title} -`leader_index`:: +[[ccr-put-follow-request-body-leader_index]]`leader_index`:: (Required, string) The name of the index in the leader cluster to follow. -`remote_cluster`:: +[[ccr-put-follow-request-body-remote_cluster]]`remote_cluster`:: (Required, string) The <> containing the leader index. diff --git a/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc index a0fca83f0b244..bcf6dd8040146 100644 --- a/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc @@ -249,7 +249,7 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=background-persist-interval] (Optional, object) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=custom-settings] -`daily_model_snapshot_retention_after_days`:: +[[put-dailymodelsnapshotretentionafterdays]]`daily_model_snapshot_retention_after_days`:: (Optional, long) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=daily-model-snapshot-retention-after-days] @@ -259,7 +259,7 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=daily-model-snapshot-retention- include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=data-description] //End data_description -`datafeed_config`:: +[[put-datafeedconfig]]`datafeed_config`:: (Optional, object) The {ml-docs}/ml-dfeeds.html[{dfeed}], which retrieves data from {es} for analysis by the job. You can associate only one {dfeed} with each {anomaly-job}. @@ -375,19 +375,19 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=model-plot-config-terms] ==== //End model_plot_config -`model_snapshot_retention_days`:: +[[put-modelsnapshotretentiondays]]`model_snapshot_retention_days`:: (Optional, long) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=model-snapshot-retention-days] -`renormalization_window_days`:: +[[put-renormalizationwindowdays]]`renormalization_window_days`:: (Optional, long) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=renormalization-window-days] -`results_index_name`:: +[[put-resultsindexname]]`results_index_name`:: (Optional, string) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=results-index-name] -`results_retention_days`:: +[[put-resultsretentiondays]]`results_retention_days`:: (Optional, long) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=results-retention-days] From 5b1f8d4b37a7d7e800ff87a1bd1188916262b7da Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 26 Jan 2022 20:11:24 +0100 Subject: [PATCH 053/100] Make AllocationDeciders.canRemain more Efficient (#83171) The iteration before this change was over the value collection of a linked hash map, wrapped in unmodifiable. Given that this runs approximately #deciders * #shards iterations during a reroute it makes sense to optimize here. In practice the iteration did not inline well and showed up as about 25% of the overall cost of this method. --- .../routing/allocation/decider/AllocationDeciders.java | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java index f6f43bd5c47c6..83d68724d0d0c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java @@ -17,7 +17,6 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import java.util.Collection; -import java.util.Collections; /** * A composite {@link AllocationDecider} combining the "decision" of multiple @@ -27,10 +26,10 @@ public class AllocationDeciders extends AllocationDecider { private static final Logger logger = LogManager.getLogger(AllocationDeciders.class); - private final Collection allocations; + private final AllocationDecider[] allocations; public AllocationDeciders(Collection allocations) { - this.allocations = Collections.unmodifiableCollection(allocations); + this.allocations = allocations.toArray(AllocationDecider[]::new); } @Override @@ -265,7 +264,7 @@ public Decision canAllocateReplicaWhenThereIsRetentionLease(ShardRouting shardRo return ret; } - private void addDecision(Decision.Multi ret, Decision decision, RoutingAllocation allocation) { + private static void addDecision(Decision.Multi ret, Decision decision, RoutingAllocation allocation) { // We never add ALWAYS decisions and only add YES decisions when requested by debug mode (since Multi default is YES). if (decision != Decision.ALWAYS && (allocation.getDebugMode() == RoutingAllocation.DebugMode.ON || decision.type() != Decision.Type.YES)) { From b65fb17a48328dc91c66facda3f6b41e6f5a8efb Mon Sep 17 00:00:00 2001 From: Mitar Date: Wed, 26 Jan 2022 11:07:50 -0800 Subject: [PATCH 054/100] Fixed documentation for built in date formats. (#83036) We had a lot of `ZZ` on the end of formats. But it's just `Z`. --- docs/reference/mapping/params/format.asciidoc | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/docs/reference/mapping/params/format.asciidoc b/docs/reference/mapping/params/format.asciidoc index 9d3468e38a5a1..67780bc3fdcab 100644 --- a/docs/reference/mapping/params/format.asciidoc +++ b/docs/reference/mapping/params/format.asciidoc @@ -180,12 +180,12 @@ The following tables lists all the defaults ISO formats supported: `date_time` or `strict_date_time`:: A formatter that combines a full date and time, separated by a 'T': - `yyyy-MM-dd'T'HH:mm:ss.SSSZZ`. + `yyyy-MM-dd'T'HH:mm:ss.SSSZ`. `date_time_no_millis` or `strict_date_time_no_millis`:: A formatter that combines a full date and time without millis, separated - by a 'T': `yyyy-MM-dd'T'HH:mm:ssZZ`. + by a 'T': `yyyy-MM-dd'T'HH:mm:ssZ`. `hour` or `strict_hour`:: @@ -219,34 +219,34 @@ The following tables lists all the defaults ISO formats supported: `ordinal_date_time` or `strict_ordinal_date_time`:: A formatter for a full ordinal date and time, using a four digit year and - three digit dayOfYear: `yyyy-DDD'T'HH:mm:ss.SSSZZ`. + three digit dayOfYear: `yyyy-DDD'T'HH:mm:ss.SSSZ`. `ordinal_date_time_no_millis` or `strict_ordinal_date_time_no_millis`:: A formatter for a full ordinal date and time without millis, using a four - digit year and three digit dayOfYear: `yyyy-DDD'T'HH:mm:ssZZ`. + digit year and three digit dayOfYear: `yyyy-DDD'T'HH:mm:ssZ`. `time` or `strict_time`:: A formatter for a two digit hour of day, two digit minute of hour, two digit second of minute, three digit fraction of second, and time zone - offset: `HH:mm:ss.SSSZZ`. + offset: `HH:mm:ss.SSSZ`. `time_no_millis` or `strict_time_no_millis`:: A formatter for a two digit hour of day, two digit minute of hour, two - digit second of minute, and time zone offset: `HH:mm:ssZZ`. + digit second of minute, and time zone offset: `HH:mm:ssZ`. `t_time` or `strict_t_time`:: A formatter for a two digit hour of day, two digit minute of hour, two digit second of minute, three digit fraction of second, and time zone - offset prefixed by 'T': `'T'HH:mm:ss.SSSZZ`. + offset prefixed by 'T': `'T'HH:mm:ss.SSSZ`. `t_time_no_millis` or `strict_t_time_no_millis`:: A formatter for a two digit hour of day, two digit minute of hour, two - digit second of minute, and time zone offset prefixed by 'T': `'T'HH:mm:ssZZ`. + digit second of minute, and time zone offset prefixed by 'T': `'T'HH:mm:ssZ`. `week_date` or `strict_week_date`:: @@ -256,12 +256,12 @@ The following tables lists all the defaults ISO formats supported: `week_date_time` or `strict_week_date_time`:: A formatter that combines a full weekyear date and time, separated by a - 'T': `xxxx-'W'ww-e'T'HH:mm:ss.SSSZZ`. + 'T': `xxxx-'W'ww-e'T'HH:mm:ss.SSSZ`. `week_date_time_no_millis` or `strict_week_date_time_no_millis`:: A formatter that combines a full weekyear date and time without millis, - separated by a 'T': `xxxx-'W'ww-e'T'HH:mm:ssZZ`. + separated by a 'T': `xxxx-'W'ww-e'T'HH:mm:ssZ`. `weekyear` or `strict_weekyear`:: From 39b3cb8fe166976f6211d6ffea4d902b14642f28 Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 26 Jan 2022 19:22:31 +0000 Subject: [PATCH 055/100] Correct context for ClusterConnManager listener (#83035) Today `ClusterConnectionManager#connectToNode` completes its listeners in the thread context in which the connection completes, which may not be the correct context if there are multiple concurrent connection attempts. With this commit we make sure to complete each listener in the context in which it was passed to the corresponding call to `connectToNode`. Co-authored-by: ievgen.degtiarenko --- docs/changelog/83035.yaml | 5 ++ .../transport/ClusterConnectionManager.java | 18 +++-- .../transport/RemoteClusterConnection.java | 2 +- .../transport/TransportService.java | 2 +- .../action/main/MainActionTests.java | 3 +- .../search/MultiSearchActionTookTests.java | 2 +- .../TransportWriteActionTests.java | 2 +- .../cluster/coordination/JoinHelperTests.java | 5 +- .../discovery/PeerFinderTests.java | 11 +++- .../ClusterConnectionManagerTests.java | 55 +++++++++------- .../ProxyConnectionStrategyTests.java | 42 ++++++++++-- .../RemoteConnectionManagerTests.java | 6 +- .../RemoteConnectionStrategyTests.java | 21 +++++- .../SniffConnectionStrategyTests.java | 66 +++++++++++++++---- .../test/transport/MockTransport.java | 4 +- .../test/transport/MockTransportService.java | 2 +- ...rtSamlInitiateSingleSignOnActionTests.java | 2 +- ...ansportOpenIdConnectLogoutActionTests.java | 2 +- .../role/TransportDeleteRoleActionTests.java | 7 +- .../role/TransportGetRolesActionTests.java | 9 +-- .../role/TransportPutRoleActionTests.java | 11 ++-- .../TransportGetRoleMappingsActionTests.java | 3 +- .../TransportPutRoleMappingActionTests.java | 3 +- ...sportSamlInvalidateSessionActionTests.java | 2 +- .../saml/TransportSamlLogoutActionTests.java | 2 +- .../TransportAuthenticateActionTests.java | 7 +- .../TransportChangePasswordActionTests.java | 11 ++-- .../user/TransportDeleteUserActionTests.java | 11 ++-- .../user/TransportGetUsersActionTests.java | 12 ++-- .../user/TransportPutUserActionTests.java | 12 ++-- .../user/TransportSetEnabledActionTests.java | 10 +-- 31 files changed, 243 insertions(+), 107 deletions(-) create mode 100644 docs/changelog/83035.yaml diff --git a/docs/changelog/83035.yaml b/docs/changelog/83035.yaml new file mode 100644 index 0000000000000..cfec033421bf9 --- /dev/null +++ b/docs/changelog/83035.yaml @@ -0,0 +1,5 @@ +pr: 83035 +summary: Correct context for `ClusterConnManager` listener +area: Network +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/transport/ClusterConnectionManager.java b/server/src/main/java/org/elasticsearch/transport/ClusterConnectionManager.java index dd8c86b67c7fe..95a1dddd94da6 100644 --- a/server/src/main/java/org/elasticsearch/transport/ClusterConnectionManager.java +++ b/server/src/main/java/org/elasticsearch/transport/ClusterConnectionManager.java @@ -10,11 +10,13 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.ListenableFuture; import org.elasticsearch.common.util.concurrent.RunOnce; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasable; @@ -44,18 +46,20 @@ public class ClusterConnectionManager implements ConnectionManager { private final AbstractRefCounted connectingRefCounter = AbstractRefCounted.of(this::pendingConnectionsComplete); private final Transport transport; + private final ThreadContext threadContext; private final ConnectionProfile defaultProfile; private final AtomicBoolean closing = new AtomicBoolean(false); private final CountDownLatch closeLatch = new CountDownLatch(1); private final DelegatingNodeConnectionListener connectionListener = new DelegatingNodeConnectionListener(); - public ClusterConnectionManager(Settings settings, Transport transport) { - this(ConnectionProfile.buildDefaultConnectionProfile(settings), transport); + public ClusterConnectionManager(Settings settings, Transport transport, ThreadContext threadContext) { + this(ConnectionProfile.buildDefaultConnectionProfile(settings), transport, threadContext); } - public ClusterConnectionManager(ConnectionProfile connectionProfile, Transport transport) { + public ClusterConnectionManager(ConnectionProfile connectionProfile, Transport transport, ThreadContext threadContext) { this.transport = transport; this.defaultProfile = connectionProfile; + this.threadContext = threadContext; } @Override @@ -91,7 +95,13 @@ public void connectToNode( ConnectionValidator connectionValidator, ActionListener listener ) throws ConnectTransportException { - connectToNodeOrRetry(node, connectionProfile, connectionValidator, 0, listener); + connectToNodeOrRetry( + node, + connectionProfile, + connectionValidator, + 0, + ContextPreservingActionListener.wrapPreservingContext(listener, threadContext) + ); } /** diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java index 3421545bb6451..ea2474d7c74e4 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java @@ -197,7 +197,7 @@ int getNumNodesConnected() { } private static ConnectionManager createConnectionManager(ConnectionProfile connectionProfile, TransportService transportService) { - return new ClusterConnectionManager(connectionProfile, transportService.transport); + return new ClusterConnectionManager(connectionProfile, transportService.transport, transportService.threadPool.getThreadContext()); } ConnectionManager getConnectionManager() { diff --git a/server/src/main/java/org/elasticsearch/transport/TransportService.java b/server/src/main/java/org/elasticsearch/transport/TransportService.java index 9ad29fee55ae4..73065a0d8a9ea 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportService.java @@ -186,7 +186,7 @@ public TransportService( localNodeFactory, clusterSettings, taskHeaders, - new ClusterConnectionManager(settings, transport) + new ClusterConnectionManager(settings, transport, threadPool.getThreadContext()) ); } diff --git a/server/src/test/java/org/elasticsearch/action/main/MainActionTests.java b/server/src/test/java/org/elasticsearch/action/main/MainActionTests.java index 5db73fd0f8ee9..e23c358b9de26 100644 --- a/server/src/test/java/org/elasticsearch/action/main/MainActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/main/MainActionTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; @@ -77,7 +78,7 @@ public void testMainActionClusterAvailable() { TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + mock(ThreadPool.class), TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, diff --git a/server/src/test/java/org/elasticsearch/action/search/MultiSearchActionTookTests.java b/server/src/test/java/org/elasticsearch/action/search/MultiSearchActionTookTests.java index 495b441c65884..01354a3702fbc 100644 --- a/server/src/test/java/org/elasticsearch/action/search/MultiSearchActionTookTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/MultiSearchActionTookTests.java @@ -122,7 +122,7 @@ private TransportMultiSearchAction createTransportMultiSearchAction(boolean cont TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> DiscoveryNode.createLocal(settings, boundAddress.publishAddress(), UUIDs.randomBase64UUID()), null, diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java index 43e4464ddb910..24d95025c09d6 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java @@ -387,7 +387,7 @@ protected TestAction(boolean withDocumentFailureOnPrimary, boolean withDocumentF new TransportService( Settings.EMPTY, mock(Transport.class), - null, + TransportWriteActionTests.threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java index ca30d4bfb4c3b..04e67e175f31b 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java @@ -66,15 +66,16 @@ public void testJoinDeduplication() { DeterministicTaskQueue deterministicTaskQueue = new DeterministicTaskQueue(); CapturingTransport capturingTransport = new HandshakingCapturingTransport(); DiscoveryNode localNode = new DiscoveryNode("node0", buildNewFakeTransportAddress(), Version.CURRENT); + final ThreadPool threadPool = deterministicTaskQueue.getThreadPool(); TransportService transportService = new TransportService( Settings.EMPTY, capturingTransport, - deterministicTaskQueue.getThreadPool(), + threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> localNode, null, Collections.emptySet(), - new ClusterConnectionManager(Settings.EMPTY, capturingTransport) + new ClusterConnectionManager(Settings.EMPTY, capturingTransport, threadPool.getThreadContext()) ); JoinHelper joinHelper = new JoinHelper( Settings.EMPTY, diff --git a/server/src/test/java/org/elasticsearch/discovery/PeerFinderTests.java b/server/src/test/java/org/elasticsearch/discovery/PeerFinderTests.java index 2dc24cf73d3b5..7369e3e25b06d 100644 --- a/server/src/test/java/org/elasticsearch/discovery/PeerFinderTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/PeerFinderTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.test.transport.CapturingTransport; import org.elasticsearch.test.transport.CapturingTransport.CapturedRequest; import org.elasticsearch.test.transport.StubbableConnectionManager; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.ClusterConnectionManager; import org.elasticsearch.transport.ConnectionManager; import org.elasticsearch.transport.TransportException; @@ -210,7 +211,13 @@ public void setup() { localNode = newDiscoveryNode("local-node"); - ConnectionManager innerConnectionManager = new ClusterConnectionManager(settings, capturingTransport); + final ThreadPool threadPool = deterministicTaskQueue.getThreadPool(); + + final ConnectionManager innerConnectionManager = new ClusterConnectionManager( + settings, + capturingTransport, + threadPool.getThreadContext() + ); StubbableConnectionManager connectionManager = new StubbableConnectionManager(innerConnectionManager); connectionManager.setDefaultNodeConnectedBehavior((cm, discoveryNode) -> { final boolean isConnected = connectedNodes.contains(discoveryNode); @@ -222,7 +229,7 @@ public void setup() { transportService = new TransportService( settings, capturingTransport, - deterministicTaskQueue.getThreadPool(), + threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> localNode, null, diff --git a/server/src/test/java/org/elasticsearch/transport/ClusterConnectionManagerTests.java b/server/src/test/java/org/elasticsearch/transport/ClusterConnectionManagerTests.java index a55a00f0135e7..89fe5509c2083 100644 --- a/server/src/test/java/org/elasticsearch/transport/ClusterConnectionManagerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/ClusterConnectionManagerTests.java @@ -19,6 +19,7 @@ import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; import org.elasticsearch.core.TimeValue; @@ -45,6 +46,7 @@ import java.util.function.Supplier; import static org.elasticsearch.test.ActionListenerUtils.anyActionListener; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; @@ -63,7 +65,7 @@ public void createConnectionManager() { Settings settings = Settings.builder().put("node.name", ClusterConnectionManagerTests.class.getSimpleName()).build(); threadPool = new ThreadPool(settings); transport = mock(Transport.class); - connectionManager = new ClusterConnectionManager(settings, transport); + connectionManager = new ClusterConnectionManager(settings, transport, threadPool.getThreadContext()); TimeValue oneSecond = new TimeValue(1000); TimeValue oneMinute = TimeValue.timeValueMinutes(1); connectionProfile = ConnectionProfile.buildSingleChannelProfile( @@ -254,6 +256,9 @@ public void testConcurrentConnects() throws Exception { int threadCount = between(1, 10); Releasable[] releasables = new Releasable[threadCount]; + final ThreadContext threadContext = threadPool.getThreadContext(); + final String contextHeader = "test-context-header"; + CyclicBarrier barrier = new CyclicBarrier(threadCount + 1); Semaphore pendingCloses = new Semaphore(threadCount); for (int i = 0; i < threadCount; i++) { @@ -265,27 +270,33 @@ public void testConcurrentConnects() throws Exception { throw new RuntimeException(e); } CountDownLatch latch = new CountDownLatch(1); - connectionManager.connectToNode(node, connectionProfile, validator, ActionListener.wrap(c -> { - assert connectionManager.nodeConnected(node); - - assertTrue(pendingCloses.tryAcquire()); - connectionManager.getConnection(node).addRemovedListener(ActionListener.wrap(pendingCloses::release)); - - if (randomBoolean()) { - releasables[threadIndex] = c; - nodeConnectedCount.incrementAndGet(); - } else { - Releasables.close(c); - nodeClosedCount.incrementAndGet(); - } - - assert latch.getCount() == 1; - latch.countDown(); - }, e -> { - nodeFailureCount.incrementAndGet(); - assert latch.getCount() == 1; - latch.countDown(); - })); + try (ThreadContext.StoredContext ignored = threadContext.stashContext()) { + final String contextValue = randomAlphaOfLength(10); + threadContext.putHeader(contextHeader, contextValue); + connectionManager.connectToNode(node, connectionProfile, validator, ActionListener.wrap(c -> { + assert connectionManager.nodeConnected(node); + assertThat(threadContext.getHeader(contextHeader), equalTo(contextValue)); + + assertTrue(pendingCloses.tryAcquire()); + connectionManager.getConnection(node).addRemovedListener(ActionListener.wrap(pendingCloses::release)); + + if (randomBoolean()) { + releasables[threadIndex] = c; + nodeConnectedCount.incrementAndGet(); + } else { + Releasables.close(c); + nodeClosedCount.incrementAndGet(); + } + + assert latch.getCount() == 1; + latch.countDown(); + }, e -> { + assertThat(threadContext.getHeader(contextHeader), equalTo(contextValue)); + nodeFailureCount.incrementAndGet(); + assert latch.getCount() == 1; + latch.countDown(); + })); + } try { latch.await(); } catch (InterruptedException e) { diff --git a/server/src/test/java/org/elasticsearch/transport/ProxyConnectionStrategyTests.java b/server/src/test/java/org/elasticsearch/transport/ProxyConnectionStrategyTests.java index ec1ef2b5aab09..e77a755a6207f 100644 --- a/server/src/test/java/org/elasticsearch/transport/ProxyConnectionStrategyTests.java +++ b/server/src/test/java/org/elasticsearch/transport/ProxyConnectionStrategyTests.java @@ -83,7 +83,11 @@ public void testProxyStrategyWillOpenExpectedNumberOfConnectionsToAddress() { localService.start(); localService.acceptIncomingRequests(); - ClusterConnectionManager connectionManager = new ClusterConnectionManager(profile, localService.transport); + final ClusterConnectionManager connectionManager = new ClusterConnectionManager( + profile, + localService.transport, + threadPool.getThreadContext() + ); int numOfConnections = randomIntBetween(4, 8); try ( RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); @@ -127,7 +131,11 @@ public void testProxyStrategyWillOpenNewConnectionsOnDisconnect() throws Excepti localService.start(); localService.acceptIncomingRequests(); - ClusterConnectionManager connectionManager = new ClusterConnectionManager(profile, localService.transport); + final ClusterConnectionManager connectionManager = new ClusterConnectionManager( + profile, + localService.transport, + threadPool.getThreadContext() + ); int numOfConnections = randomIntBetween(4, 8); AtomicBoolean useAddress1 = new AtomicBoolean(true); @@ -189,7 +197,11 @@ public void testConnectFailsWithIncompatibleNodes() { localService.start(); localService.acceptIncomingRequests(); - ClusterConnectionManager connectionManager = new ClusterConnectionManager(profile, localService.transport); + final ClusterConnectionManager connectionManager = new ClusterConnectionManager( + profile, + localService.transport, + threadPool.getThreadContext() + ); int numOfConnections = randomIntBetween(4, 8); try ( RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); @@ -232,7 +244,11 @@ public void testClusterNameValidationPreventConnectingToDifferentClusters() thro localService.start(); localService.acceptIncomingRequests(); - ClusterConnectionManager connectionManager = new ClusterConnectionManager(profile, localService.transport); + final ClusterConnectionManager connectionManager = new ClusterConnectionManager( + profile, + localService.transport, + threadPool.getThreadContext() + ); int numOfConnections = randomIntBetween(4, 8); AtomicBoolean useAddress1 = new AtomicBoolean(true); @@ -295,7 +311,11 @@ public void testProxyStrategyWillResolveAddressesEachConnect() throws Exception localService.start(); localService.acceptIncomingRequests(); - ClusterConnectionManager connectionManager = new ClusterConnectionManager(profile, localService.transport); + final ClusterConnectionManager connectionManager = new ClusterConnectionManager( + profile, + localService.transport, + threadPool.getThreadContext() + ); int numOfConnections = randomIntBetween(4, 8); try ( RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); @@ -330,7 +350,11 @@ public void testProxyStrategyWillNeedToBeRebuiltIfNumOfSocketsOrAddressesOrServe localService.start(); localService.acceptIncomingRequests(); - ClusterConnectionManager connectionManager = new ClusterConnectionManager(profile, localService.transport); + final ClusterConnectionManager connectionManager = new ClusterConnectionManager( + profile, + localService.transport, + threadPool.getThreadContext() + ); int numOfConnections = randomIntBetween(4, 8); try ( RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); @@ -435,7 +459,11 @@ public void testServerNameAttributes() { String address = "localhost:" + address1.getPort(); - ClusterConnectionManager connectionManager = new ClusterConnectionManager(profile, localService.transport); + final ClusterConnectionManager connectionManager = new ClusterConnectionManager( + profile, + localService.transport, + threadPool.getThreadContext() + ); int numOfConnections = randomIntBetween(4, 8); try ( RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteConnectionManagerTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteConnectionManagerTests.java index b045376848038..031bafbaf78fe 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteConnectionManagerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteConnectionManagerTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.test.ESTestCase; import java.net.InetAddress; @@ -35,7 +36,10 @@ public class RemoteConnectionManagerTests extends ESTestCase { public void setUp() throws Exception { super.setUp(); transport = mock(Transport.class); - remoteConnectionManager = new RemoteConnectionManager("remote-cluster", new ClusterConnectionManager(Settings.EMPTY, transport)); + remoteConnectionManager = new RemoteConnectionManager( + "remote-cluster", + new ClusterConnectionManager(Settings.EMPTY, transport, new ThreadContext(Settings.EMPTY)) + ); } @SuppressWarnings("unchecked") diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteConnectionStrategyTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteConnectionStrategyTests.java index a4b0cda7e0a8a..76cfafc412664 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteConnectionStrategyTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteConnectionStrategyTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESTestCase; @@ -17,8 +18,14 @@ public class RemoteConnectionStrategyTests extends ESTestCase { + private static final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + public void testStrategyChangeMeansThatStrategyMustBeRebuilt() { - ClusterConnectionManager connectionManager = new ClusterConnectionManager(Settings.EMPTY, mock(Transport.class)); + final ClusterConnectionManager connectionManager = new ClusterConnectionManager( + Settings.EMPTY, + mock(Transport.class), + threadContext + ); RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager("cluster-alias", connectionManager); FakeConnectionStrategy first = new FakeConnectionStrategy( "cluster-alias", @@ -34,7 +41,11 @@ public void testStrategyChangeMeansThatStrategyMustBeRebuilt() { } public void testSameStrategyChangeMeansThatStrategyDoesNotNeedToBeRebuilt() { - ClusterConnectionManager connectionManager = new ClusterConnectionManager(Settings.EMPTY, mock(Transport.class)); + final ClusterConnectionManager connectionManager = new ClusterConnectionManager( + Settings.EMPTY, + mock(Transport.class), + threadContext + ); RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager("cluster-alias", connectionManager); FakeConnectionStrategy first = new FakeConnectionStrategy( "cluster-alias", @@ -50,7 +61,11 @@ public void testSameStrategyChangeMeansThatStrategyDoesNotNeedToBeRebuilt() { } public void testChangeInConnectionProfileMeansTheStrategyMustBeRebuilt() { - ClusterConnectionManager connectionManager = new ClusterConnectionManager(TestProfiles.LIGHT_PROFILE, mock(Transport.class)); + final ClusterConnectionManager connectionManager = new ClusterConnectionManager( + TestProfiles.LIGHT_PROFILE, + mock(Transport.class), + threadContext + ); assertEquals(TimeValue.MINUS_ONE, connectionManager.getConnectionProfile().getPingInterval()); assertEquals(Compression.Enabled.INDEXING_DATA, connectionManager.getConnectionProfile().getCompressionEnabled()); assertEquals(Compression.Scheme.LZ4, connectionManager.getConnectionProfile().getCompressionScheme()); diff --git a/server/src/test/java/org/elasticsearch/transport/SniffConnectionStrategyTests.java b/server/src/test/java/org/elasticsearch/transport/SniffConnectionStrategyTests.java index 02c1fd9f6603e..926ebfd892812 100644 --- a/server/src/test/java/org/elasticsearch/transport/SniffConnectionStrategyTests.java +++ b/server/src/test/java/org/elasticsearch/transport/SniffConnectionStrategyTests.java @@ -123,7 +123,11 @@ public void testSniffStrategyWillConnectToAndDiscoverNodes() { localService.start(); localService.acceptIncomingRequests(); - ClusterConnectionManager connectionManager = new ClusterConnectionManager(profile, localService.transport); + final ClusterConnectionManager connectionManager = new ClusterConnectionManager( + profile, + localService.transport, + threadPool.getThreadContext() + ); try ( RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); SniffConnectionStrategy strategy = new SniffConnectionStrategy( @@ -172,7 +176,11 @@ public void testSniffStrategyWillResolveDiscoveryNodesEachConnect() throws Excep localService.start(); localService.acceptIncomingRequests(); - ClusterConnectionManager connectionManager = new ClusterConnectionManager(profile, localService.transport); + final ClusterConnectionManager connectionManager = new ClusterConnectionManager( + profile, + localService.transport, + threadPool.getThreadContext() + ); try ( RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); SniffConnectionStrategy strategy = new SniffConnectionStrategy( @@ -220,7 +228,11 @@ public void testSniffStrategyWillConnectToMaxAllowedNodesAndOpenNewConnectionsOn localService.start(); localService.acceptIncomingRequests(); - ClusterConnectionManager connectionManager = new ClusterConnectionManager(profile, localService.transport); + final ClusterConnectionManager connectionManager = new ClusterConnectionManager( + profile, + localService.transport, + threadPool.getThreadContext() + ); try ( RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); SniffConnectionStrategy strategy = new SniffConnectionStrategy( @@ -277,7 +289,11 @@ public void testDiscoverWithSingleIncompatibleSeedNode() { localService.start(); localService.acceptIncomingRequests(); - ClusterConnectionManager connectionManager = new ClusterConnectionManager(profile, localService.transport); + final ClusterConnectionManager connectionManager = new ClusterConnectionManager( + profile, + localService.transport, + threadPool.getThreadContext() + ); try ( RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); SniffConnectionStrategy strategy = new SniffConnectionStrategy( @@ -316,7 +332,11 @@ public void testConnectFailsWithIncompatibleNodes() { localService.start(); localService.acceptIncomingRequests(); - ClusterConnectionManager connectionManager = new ClusterConnectionManager(profile, localService.transport); + final ClusterConnectionManager connectionManager = new ClusterConnectionManager( + profile, + localService.transport, + threadPool.getThreadContext() + ); try ( RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); SniffConnectionStrategy strategy = new SniffConnectionStrategy( @@ -358,7 +378,11 @@ public void testFilterNodesWithNodePredicate() { localService.start(); localService.acceptIncomingRequests(); - ClusterConnectionManager connectionManager = new ClusterConnectionManager(profile, localService.transport); + final ClusterConnectionManager connectionManager = new ClusterConnectionManager( + profile, + localService.transport, + threadPool.getThreadContext() + ); try ( RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); SniffConnectionStrategy strategy = new SniffConnectionStrategy( @@ -405,7 +429,11 @@ public void testConnectFailsIfNoConnectionsOpened() { localService.acceptIncomingRequests(); // Predicate excludes seed node as a possible connection - ClusterConnectionManager connectionManager = new ClusterConnectionManager(profile, localService.transport); + final ClusterConnectionManager connectionManager = new ClusterConnectionManager( + profile, + localService.transport, + threadPool.getThreadContext() + ); try ( RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); SniffConnectionStrategy strategy = new SniffConnectionStrategy( @@ -454,7 +482,11 @@ public void testClusterNameValidationPreventConnectingToDifferentClusters() thro localService.start(); localService.acceptIncomingRequests(); - ClusterConnectionManager connectionManager = new ClusterConnectionManager(profile, localService.transport); + final ClusterConnectionManager connectionManager = new ClusterConnectionManager( + profile, + localService.transport, + threadPool.getThreadContext() + ); try ( RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); SniffConnectionStrategy strategy = new SniffConnectionStrategy( @@ -522,7 +554,11 @@ public void testMultipleCallsToConnectEnsuresConnection() { localService.start(); localService.acceptIncomingRequests(); - ClusterConnectionManager connectionManager = new ClusterConnectionManager(profile, localService.transport); + final ClusterConnectionManager connectionManager = new ClusterConnectionManager( + profile, + localService.transport, + threadPool.getThreadContext() + ); try ( RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); SniffConnectionStrategy strategy = new SniffConnectionStrategy( @@ -611,7 +647,11 @@ public void testConfiguredProxyAddressModeWillReplaceNodeAddress() { List seedNodes = Collections.singletonList(accessibleNode.toString()); TransportAddress proxyAddress = accessibleNode.getAddress(); - ClusterConnectionManager connectionManager = new ClusterConnectionManager(profile, transport); + final ClusterConnectionManager connectionManager = new ClusterConnectionManager( + profile, + transport, + threadPool.getThreadContext() + ); try ( RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); SniffConnectionStrategy strategy = new SniffConnectionStrategy( @@ -659,7 +699,11 @@ public void testSniffStrategyWillNeedToBeRebuiltIfNumOfConnectionsOrSeedsOrProxy localService.start(); localService.acceptIncomingRequests(); - ClusterConnectionManager connectionManager = new ClusterConnectionManager(profile, localService.transport); + final ClusterConnectionManager connectionManager = new ClusterConnectionManager( + profile, + localService.transport, + threadPool.getThreadContext() + ); try ( RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); SniffConnectionStrategy strategy = new SniffConnectionStrategy( diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransport.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransport.java index 55d2e66bdcc0d..1a725392e0af4 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransport.java @@ -57,7 +57,9 @@ public TransportService createTransportService( @Nullable ClusterSettings clusterSettings, Set taskHeaders ) { - StubbableConnectionManager connectionManager = new StubbableConnectionManager(new ClusterConnectionManager(settings, this)); + final StubbableConnectionManager connectionManager = new StubbableConnectionManager( + new ClusterConnectionManager(settings, this, threadPool.getThreadContext()) + ); connectionManager.setDefaultNodeConnectedBehavior((cm, node) -> false); connectionManager.setDefaultGetConnectionBehavior((cm, discoveryNode) -> createConnection(discoveryNode)); return new TransportService( diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java index fce2ee039d096..624613304a6e0 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java @@ -232,7 +232,7 @@ private MockTransportService( localNodeFactory, clusterSettings, taskHeaders, - new StubbableConnectionManager(new ClusterConnectionManager(settings, transport)) + new StubbableConnectionManager(new ClusterConnectionManager(settings, transport, threadPool.getThreadContext())) ); this.original = transport.getDelegate(); } diff --git a/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/action/TransportSamlInitiateSingleSignOnActionTests.java b/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/action/TransportSamlInitiateSingleSignOnActionTests.java index dc6ba1ff3d152..ca18d12939a7e 100644 --- a/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/action/TransportSamlInitiateSingleSignOnActionTests.java +++ b/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/action/TransportSamlInitiateSingleSignOnActionTests.java @@ -135,7 +135,7 @@ private TransportSamlInitiateSingleSignOnAction setupTransportAction(boolean wit final TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectLogoutActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectLogoutActionTests.java index 7e6a7e58aeb67..0070dcff1950d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectLogoutActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectLogoutActionTests.java @@ -190,7 +190,7 @@ public void setup() throws Exception { final TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportDeleteRoleActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportDeleteRoleActionTests.java index d2aaa56d98fc4..1dd1b1ffd7719 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportDeleteRoleActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportDeleteRoleActionTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.security.action.role.DeleteRoleRequest; @@ -45,7 +46,7 @@ public void testReservedRole() { TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + mock(ThreadPool.class), TransportService.NOOP_TRANSPORT_INTERCEPTOR, (x) -> null, null, @@ -82,7 +83,7 @@ public void testValidRole() { TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + mock(ThreadPool.class), TransportService.NOOP_TRANSPORT_INTERCEPTOR, (x) -> null, null, @@ -130,7 +131,7 @@ public void testException() { TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + mock(ThreadPool.class), TransportService.NOOP_TRANSPORT_INTERCEPTOR, (x) -> null, null, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportGetRolesActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportGetRolesActionTests.java index 88365e24581e6..e5b776057d210 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportGetRolesActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportGetRolesActionTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.security.action.role.GetRolesRequest; @@ -50,7 +51,7 @@ public void testReservedRoles() { TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + mock(ThreadPool.class), TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, @@ -110,7 +111,7 @@ public void testStoreRoles() { TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + mock(ThreadPool.class), TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, @@ -176,7 +177,7 @@ public void testGetAllOrMix() { TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + mock(ThreadPool.class), TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, @@ -255,7 +256,7 @@ public void testException() { TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + mock(ThreadPool.class), TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportPutRoleActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportPutRoleActionTests.java index 6bbf5cc870660..9d884a0045778 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportPutRoleActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportPutRoleActionTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.join.query.HasParentQueryBuilder; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -87,7 +88,7 @@ public void testReservedRole() { TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + mock(ThreadPool.class), TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, @@ -129,7 +130,7 @@ public void testValidRole() { TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + mock(ThreadPool.class), TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, @@ -182,7 +183,7 @@ public void testException() { TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + mock(ThreadPool.class), TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, @@ -232,7 +233,7 @@ public void testCreationOfRoleWithMalformedQueryJsonFails() { TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + mock(ThreadPool.class), TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, @@ -287,7 +288,7 @@ public void testCreationOfRoleWithUnsupportedQueryFails() throws Exception { TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + mock(ThreadPool.class), TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportGetRoleMappingsActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportGetRoleMappingsActionTests.java index cfe056c80f919..6e8698f095d32 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportGetRoleMappingsActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportGetRoleMappingsActionTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsRequest; @@ -51,7 +52,7 @@ public void setupMocks() { TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + mock(ThreadPool.class), TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingActionTests.java index 13fb17bddeca9..6f789a10a3a6c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingActionTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingRequest; @@ -48,7 +49,7 @@ public void setupMocks() { TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + mock(ThreadPool.class), TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java index 3079cb450ee41..0ba33bb720dfb 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java @@ -261,7 +261,7 @@ protected void final TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java index 42f267017b5a0..ee0deeabc7bb2 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java @@ -221,7 +221,7 @@ public void setup() throws Exception { final TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateActionTests.java index e77f8cb83b256..9089501802636 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateActionTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.util.ArrayUtils; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.security.SecurityContext; @@ -62,7 +63,7 @@ public void testInternalUser() { TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + mock(ThreadPool.class), TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, @@ -99,7 +100,7 @@ public void testNullUser() { TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + mock(ThreadPool.class), TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, @@ -256,7 +257,7 @@ private TransportAuthenticateAction prepareAction(AnonymousUser anonymousUser, U TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + mock(ThreadPool.class), TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportChangePasswordActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportChangePasswordActionTests.java index da0eefb752177..ebd75c28fddea 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportChangePasswordActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportChangePasswordActionTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.SecuritySettingsSourceField; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.XPackSettings; @@ -61,7 +62,7 @@ public void testAnonymousUser() { TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + mock(ThreadPool.class), TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, @@ -105,7 +106,7 @@ public void testInternalUsers() { TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + mock(ThreadPool.class), TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, @@ -167,7 +168,7 @@ public void testValidUser() { TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + mock(ThreadPool.class), TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, @@ -212,7 +213,7 @@ public void testIncorrectPasswordHashingAlgorithm() { TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + mock(ThreadPool.class), TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, @@ -266,7 +267,7 @@ public void testException() { TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + mock(ThreadPool.class), TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportDeleteUserActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportDeleteUserActionTests.java index 8c2f964152ec1..1511c906ca1df 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportDeleteUserActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportDeleteUserActionTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.security.action.user.DeleteUserRequest; @@ -51,7 +52,7 @@ public void testAnonymousUser() { TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + mock(ThreadPool.class), TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, @@ -86,7 +87,7 @@ public void testInternalUser() { TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + mock(ThreadPool.class), TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, @@ -134,7 +135,7 @@ public void testReservedUser() { TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + mock(ThreadPool.class), TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, @@ -175,7 +176,7 @@ public void testValidUser() { TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + mock(ThreadPool.class), TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, @@ -226,7 +227,7 @@ public void testException() { TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + mock(ThreadPool.class), TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersActionTests.java index ed6de0ca20943..bbbafb1f83e0e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersActionTests.java @@ -94,7 +94,7 @@ public void testAnonymousUser() { TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + mock(ThreadPool.class), TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, @@ -141,7 +141,7 @@ public void testInternalUser() { TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + mock(ThreadPool.class), TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, @@ -207,7 +207,7 @@ public void testReservedUsersOnly() { TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + mock(ThreadPool.class), TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, @@ -268,7 +268,7 @@ public void testGetAllUsers() { TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + mock(ThreadPool.class), TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, @@ -329,7 +329,7 @@ public void testGetStoreOnlyUsers() { TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + mock(ThreadPool.class), TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, @@ -393,7 +393,7 @@ public void testException() { TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + mock(ThreadPool.class), TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportPutUserActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportPutUserActionTests.java index fb4def7261459..69c7a7be85ad1 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportPutUserActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportPutUserActionTests.java @@ -64,7 +64,7 @@ public void testAnonymousUser() { TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + mock(ThreadPool.class), TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, @@ -100,7 +100,7 @@ public void testSystemUser() { TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + mock(ThreadPool.class), TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, @@ -157,7 +157,7 @@ public void testReservedUser() { TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, @@ -193,7 +193,7 @@ public void testValidUser() { TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + mock(ThreadPool.class), TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, @@ -243,7 +243,7 @@ public void testInvalidUser() { TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + mock(ThreadPool.class), TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, @@ -271,7 +271,7 @@ public void testException() { TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + mock(ThreadPool.class), TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportSetEnabledActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportSetEnabledActionTests.java index 30159e18b5057..2d94e6c37325c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportSetEnabledActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportSetEnabledActionTests.java @@ -70,7 +70,7 @@ public void testAnonymousUser() throws Exception { TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, @@ -124,7 +124,7 @@ public void testInternalUser() throws Exception { TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, @@ -198,7 +198,7 @@ public void testValidUser() throws Exception { TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, @@ -267,7 +267,7 @@ public void testException() throws Exception { TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, @@ -326,7 +326,7 @@ public void testUserModifyingThemselves() throws Exception { TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), - null, + threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, From 86174d6add50959e166b6100d0bfea667b5fa685 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 26 Jan 2022 12:59:45 -0800 Subject: [PATCH 056/100] Use github issue template forms (#83177) Currently we use GitHub issue templates containing markdown for each template. This was an improvement over the single issue template before that, and there are several bits of information that we ask for, but filling out the template is cumbersome because it is completely markdown, and users often leave bits of markdown comments meant for them to read in the resulting issue. This commit converts the existing issue templates to use the new github issue template forms. These are configured through yml, and present an html form to the user that is customized for each issue type. --- .github/ISSUE_TEMPLATE/bug.md | 44 -------------- .github/ISSUE_TEMPLATE/bug.yml | 70 ++++++++++++++++++++++ .github/ISSUE_TEMPLATE/feature-request.md | 21 ------- .github/ISSUE_TEMPLATE/feature-request.yml | 19 ++++++ .github/ISSUE_TEMPLATE/test-failure.md | 28 --------- .github/ISSUE_TEMPLATE/test-failure.yml | 52 ++++++++++++++++ 6 files changed, 141 insertions(+), 93 deletions(-) delete mode 100644 .github/ISSUE_TEMPLATE/bug.md create mode 100644 .github/ISSUE_TEMPLATE/bug.yml delete mode 100644 .github/ISSUE_TEMPLATE/feature-request.md create mode 100644 .github/ISSUE_TEMPLATE/feature-request.yml delete mode 100644 .github/ISSUE_TEMPLATE/test-failure.md create mode 100644 .github/ISSUE_TEMPLATE/test-failure.yml diff --git a/.github/ISSUE_TEMPLATE/bug.md b/.github/ISSUE_TEMPLATE/bug.md deleted file mode 100644 index 0122f08753705..0000000000000 --- a/.github/ISSUE_TEMPLATE/bug.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -name: Bug -about: "Report a confirmed bug. For unconfirmed bugs please - visit https://discuss.elastic.co/c/elasticsearch" -labels: ">bug,needs:triage" - ---- - - -**Elasticsearch version** (`bin/elasticsearch --version`): - -**Plugins installed**: [] - -**JVM version** (`java -version`): - -**OS version** (`uname -a` if on a Unix-like system): - -**Description of the problem including expected versus actual behavior**: - -**Steps to reproduce**: - -Please include a *minimal* but *complete* recreation of the problem, -including (e.g.) index creation, mappings, settings, query etc. The easier -you make for us to reproduce it, the more likely that somebody will take the -time to look at it. - - 1. - 2. - 3. - -**Provide logs (if relevant)**: - - diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml new file mode 100644 index 0000000000000..0bae5de1d0f8a --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug.yml @@ -0,0 +1,70 @@ +name: Bug +description: "Report a confirmed bug. For unconfirmed bugs please visit https://discuss.elastic.co/c/elasticsearch" +labels: [">bug", "needs:triage"] +body: + - type: markdown + attributes: + value: | + Github is reserved for bug reports and feature requests; it is + not the place for general questions. If you have a question or + an unconfirmed bug, please visit the [forums](https://discuss.elastic.co/c/elasticsearch). + Please also check your OS is [supported](https://www.elastic.co/support/matrix#show_os). + If it is not, the issue is likely to be closed. + + For security vulnerabilities please only send reports to security@elastic.co. + See https://www.elastic.co/community/security for more information. + + Please fill in the following details to help us reproduce the bug: + - type: input + id: es_version + attributes: + label: Elasticsearch Version + description: The version of Elasticsearch you are running, found with `bin/elasticsearch --version` + validations: + required: true + - type: input + id: plugins + attributes: + label: Installed Plugins + description: Comma separated list of plugins that you have installed + validations: + required: false + - type: input + id: java_version + attributes: + label: Java Version + description: The version of Java Elasticsearch is running with, found with `java -version` + value: _bundled_ + validations: + required: true + - type: input + id: os_version + attributes: + label: OS Version + description: The version of your operating system, found with `uname -a` of on unix-like system + validations: + required: true + - type: textarea + id: problem + attributes: + label: Problem Description + description: A description of the problem including expected versus actual behavior + validations: + required: true + - type: textarea + id: steps + attributes: + label: Steps to Reproduce + description: | + A minimal but complete recreation of the problem including (e.g.) index + creation, mappings, settings, query etc. The easier you make for us to + reproduce it, the more likely that somebody will take the time to look at it. + validations: + required: true + - type: textarea + id: logs + attributes: + label: Logs (if relevant) + description: Any log relevant log messages or snippets + validations: + required: false diff --git a/.github/ISSUE_TEMPLATE/feature-request.md b/.github/ISSUE_TEMPLATE/feature-request.md deleted file mode 100644 index 3e280107dbd72..0000000000000 --- a/.github/ISSUE_TEMPLATE/feature-request.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -name: Feature Request -about: Request a new feature we haven't thought of -labels: ">enhancement,needs:triage" - ---- - - - diff --git a/.github/ISSUE_TEMPLATE/feature-request.yml b/.github/ISSUE_TEMPLATE/feature-request.yml new file mode 100644 index 0000000000000..10385d018e434 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature-request.yml @@ -0,0 +1,19 @@ +name: Feature Request +description: Request a new feature we haven't thought of +labels: [">enhancement", "needs:triage"] +body: + - type: markdown + attributes: + value: | + Please first search existing issues for the feature you are requesting; + it may already exist, even as a closed issue. + - type: textarea + id: description + attributes: + label: Description + description: | + Please give us as much context as possible about the feature. For example, + you could include a story about a time when you wanted to use the feature, + and also tell us what you had to do instead. The last part is helpful + because it gives us an idea of how much harder your life is without the + feature. diff --git a/.github/ISSUE_TEMPLATE/test-failure.md b/.github/ISSUE_TEMPLATE/test-failure.md deleted file mode 100644 index 8ec95a7886af8..0000000000000 --- a/.github/ISSUE_TEMPLATE/test-failure.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -name: Test Failure -about: A test failure in CI -labels: ">test-failure" - ---- - - - -**Build scan**: - -**Repro line**: - -**Reproduces locally?**: - -**Applicable branches**: - -**Failure history**: - -**Failure excerpt**: - - diff --git a/.github/ISSUE_TEMPLATE/test-failure.yml b/.github/ISSUE_TEMPLATE/test-failure.yml new file mode 100644 index 0000000000000..710ef86908005 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/test-failure.yml @@ -0,0 +1,52 @@ +name: Test Failure +description: A test failure in CI +labels: [">test-failure"] +body: + - type: markdown + attributes: + value: Please fill out the following information, and ensure you have attempted to reproduce locally + - type: input + id: link + attributes: + label: CI Link + description: A link to a Gradle buildscan (preferred) or Jenkins job with the failure + validations: + required: true + - type: input + id: repro_line + attributes: + label: Repro line + description: The reproduce line from the build output + validations: + required: true + - type: dropdown + id: reproducible + attributes: + label: Does it reproduce? + options: + - "Yes" + - "No" + - "Didn't try" + validations: + required: true + - type: input + id: branches + attributes: + label: Applicable branches + description: Branch names this failure has occurred on + validations: + required: true + - type: input + id: history + attributes: + label: Failure history + description: Link to build stats and possible indication of when this started failing and how often + validations: + required: false + - type: textarea + id: excerpt + attributes: + label: Failure excerpt + description: The error or exception that occurs + validations: + required: true From cb6265f9bd72d743967effc58e26a7b95bb0610f Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Wed, 26 Jan 2022 17:13:39 -0500 Subject: [PATCH 057/100] [DOCS] Move snapshot repo types to separate pages (#82826) With https://github.com/elastic/elasticsearch/pull/81870, the Azure, GCS, and S3 repository types have separate, dedicated pages in the Elasticsearch guide. For consistency, this PR creates separate pages for the shared file system, read-only URL, and source-only repository types. Related changes: - Adds redirects to the plugins docs - Fixes a few breaking changes that refer to the Azure, GCS, and S3 repositories as plugins. Co-authored-by: Adam Locke --- docs/plugins/redirects.asciidoc | 74 +++++++ .../cluster-node-setting-changes.asciidoc | 9 +- .../migrate_8_0/rest-api-changes.asciidoc | 9 +- .../apis/put-repo-api.asciidoc | 183 ++---------------- .../on-prem-repo-type.asciidoc | 2 + .../register-repository.asciidoc | 142 +++----------- .../repository-read-only-url.asciidoc | 76 ++++++++ .../repository-shared-file-system.asciidoc | 49 +++++ .../repository-shared-settings.asciidoc | 32 ++- .../repository-source-only.asciidoc | 78 ++++++++ 10 files changed, 358 insertions(+), 296 deletions(-) create mode 100644 docs/reference/snapshot-restore/on-prem-repo-type.asciidoc create mode 100644 docs/reference/snapshot-restore/repository-read-only-url.asciidoc create mode 100644 docs/reference/snapshot-restore/repository-shared-file-system.asciidoc create mode 100644 docs/reference/snapshot-restore/repository-source-only.asciidoc diff --git a/docs/plugins/redirects.asciidoc b/docs/plugins/redirects.asciidoc index c691ac4885589..96b432e421c65 100644 --- a/docs/plugins/redirects.asciidoc +++ b/docs/plugins/redirects.asciidoc @@ -71,3 +71,77 @@ See {ref}/monitor-elasticsearch-cluster.html[{stack} monitoring]. === Security plugins See {ref}/secure-cluster.html[{stack} security]. + +[role="exclude",id="repository-azure"] +=== Azure repository plugin + +// tag::azure-repo-migration[] +The Azure repository plugin is now included in {es}. +See {ref}/repository-azure.html[Azure repository]. +// end::azure-repo-migration[] + +[role="exclude",id="repository-azure-usage"] +=== Azure repository plugin + +include::redirects.asciidoc[tag=azure-repo-migration] + +[role="exclude",id="repository-azure-client-settings"] +=== Azure repository plugin + +include::redirects.asciidoc[tag=azure-repo-migration] + +[role="exclude",id="repository-azure-repository-settings"] +=== Azure repository plugin + +include::redirects.asciidoc[tag=azure-repo-migration] + +[role="exclude",id="repository-azure-validation"] +=== Azure repository plugin + +include::redirects.asciidoc[tag=azure-repo-migration] + +[role="exclude",id="repository-s3"] +=== S3 repository plugin + +// tag::s3-repo-migration[] +The S3 repository plugin is now included in {es}. +See {ref}/repository-s3.html[S3 repository]. +// end::s3-repo-migration[] + +[role="exclude",id="repository-s3-usage"] +=== S3 repository plugin + +include::redirects.asciidoc[tag=s3-repo-migration] + +[role="exclude",id="repository-s3-client"] +=== S3 repository plugin + +include::redirects.asciidoc[tag=s3-repo-migration] + +[role="exclude",id="repository-s3-repository"] +=== S3 repository plugin + +include::redirects.asciidoc[tag=s3-repo-migration] + +[role="exclude",id="repository-gcs"] +=== Google Cloud Storage repository plugin + +// tag::gcs-repo-migration[] +The Google Cloud Storage repository plugin is now included in {es}. +See {ref}/repository-gcs.html[Google Cloud Storage repository]. +// end::gcs-repo-migration[] + +[role="exclude",id="repository-gcs-usage"] +=== Google Cloud Storage repository plugin + +include::redirects.asciidoc[tag=gcs-repo-migration] + +[role="exclude",id="repository-gcs-client"] +=== Google Cloud Storage repository plugin + +include::redirects.asciidoc[tag=gcs-repo-migration] + +[role="exclude",id="repository-gcs-repository"] +=== Google Cloud Storage repository plugin + +include::redirects.asciidoc[tag=gcs-repo-migration] diff --git a/docs/reference/migration/migrate_8_0/cluster-node-setting-changes.asciidoc b/docs/reference/migration/migrate_8_0/cluster-node-setting-changes.asciidoc index 1889290278f11..6fd3fc57171d8 100644 --- a/docs/reference/migration/migrate_8_0/cluster-node-setting-changes.asciidoc +++ b/docs/reference/migration/migrate_8_0/cluster-node-setting-changes.asciidoc @@ -875,11 +875,10 @@ previously be used to enable compression for all shared file system repositories The `repositories.fs.compress` setting has been removed. *Impact* + -Use the repository specific `compress` setting to enable compression. See -{ref}/snapshots-register-repository.html[Register a snapshot repository] for -information on the `compress` setting. - -Discontinue use of the `repositories.fs.compress` node-level setting. +Discontinue use of the `repositories.fs.compress` node-level setting. Use the +repository-specific `compress` setting to enable compression instead. Refer to +{ref}/snapshots-filesystem-repository.html#filesystem-repository-settings[Shared +file system repository settings]. ==== //end::notable-breaking-changes[] diff --git a/docs/reference/migration/migrate_8_0/rest-api-changes.asciidoc b/docs/reference/migration/migrate_8_0/rest-api-changes.asciidoc index 988521ca4ee70..11eefa04d3429 100644 --- a/docs/reference/migration/migrate_8_0/rest-api-changes.asciidoc +++ b/docs/reference/migration/migrate_8_0/rest-api-changes.asciidoc @@ -991,21 +991,18 @@ Previously, the default value for `compress` was `false`. The default has been c This change will affect both newly created repositories and existing repositories where `compress=false` has not been explicitly specified. -For more information on the compress option, see -{ref}/snapshots-register-repository.html[Register a snapshot repository]. - *Impact* + Update your workflow and applications to assume a default value of `true` for the `compress` parameter. ==== -.The S3 repository plugin now uses a DNS-style access pattern by default. +.S3 snapshot repositories now use a DNS-style access pattern by default. [%collapsible] ==== *Details* + -Starting in version 7.4 the `repository-s3` plugin does not use the +Starting in version 7.4, `s3` snapshot repositories no longer use the now-deprecated path-style access pattern by default. In versions 7.0, 7.1, 7.2 -and 7.3 the `repository-s3` plugin always used the path-style access pattern. +and 7.3 `s3` snapshot repositories always used the path-style access pattern. This is a breaking change for deployments that only support path-style access but which are recognized as supporting DNS-style access by the AWS SDK. This breaking change was made necessary by diff --git a/docs/reference/snapshot-restore/apis/put-repo-api.asciidoc b/docs/reference/snapshot-restore/apis/put-repo-api.asciidoc index b18a045d34c84..0b77795540a14 100644 --- a/docs/reference/snapshot-restore/apis/put-repo-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/put-repo-api.asciidoc @@ -75,183 +75,42 @@ You can manually perform this verification using the [[put-snapshot-repo-api-request-type]] `type`:: -+ --- (Required, string) Repository type. - ++ .Valid values for `type` [%collapsible%open] ==== -`fs`:: -Shared file system repository. Repositories of this type use a shared file -system to store snapshots. This file system must accessible to all master and -data nodes in the cluster. -+ -IMPORTANT: To register a shared file system repository, you must mount the same -shared filesystem to the same location on all master and data nodes. This -location must be registered in the `path.repo` setting on all master and data -nodes in the cluster. -+ -See <>. - -[xpack]#`source`#:: -Source-only repository. You can use source-only repositories to create minimal, -source-only snapshots that take up to 50% less space on disk. -+ -Source-only snapshots are only supported if the <> is enabled and no -<> is applied. -+ -WARNING: Source-only snapshots contain stored fields and index metadata. They do -not include index or doc values structures and are not searchable when restored. -After restoring a source-only snapshot, you must <> the -data into a new index. -+ -See <>. -`url`:: -URL repository. Repositories of this type are read-only -for the cluster. This means the cluster can retrieve or restore snapshots from -the repository but cannot write or create snapshots in it. -+ -You can use URL repositories as an alternative way to give a cluster read-only -access to a shared file system (`fs`) repository. -+ -See <>. -==== +`azure`:: <> +`gcs`:: <> +`s3`:: <> +`fs`:: <> +`source`:: <> +`url`:: <> -More repository types are available through these official -plugins: +Other repository types are available through official plugins: -* <> for S3 repository support -* {plugins}/repository-hdfs.html[repository-hdfs] for HDFS repository support in - Hadoop environments -* <> for Azure storage repositories -* <> for Google Cloud Storage repositories --- +`hfds`:: {plugins}/repository-hdfs.html[Hadoop Distributed File System (HDFS) repository] +==== [[put-snapshot-repo-api-settings-param]] `settings`:: -+ --- (Required, object) -Contains settings for the repository. - -The following `settings` properties are valid for all repository types: - -.Properties of `settings` -[%collapsible%open] -==== -`chunk_size`:: -(Optional, <>) -Maximum size of files in snapshots. In snapshots, files larger than this are -broken down into chunks of this size or smaller. Defaults to `null` (unlimited -file size). - -`compress`:: -(Optional, Boolean) -If `true`, metadata files, such as index mappings and settings, are compressed -in snapshots. Data files are not compressed. Defaults to `true`. - -`max_number_of_snapshots`:: -(Optional, integer) -Maximum number of snapshots the repository can contain. -Defaults to `Integer.MAX_VALUE`, which is `2^31-1` or `2147483647`. - -`max_restore_bytes_per_sec`:: -(Optional, <>) -Maximum snapshot restore rate per node. Defaults to unlimited. Note -that restores are also throttled through <>. - -`max_snapshot_bytes_per_sec`:: -(Optional, <>) -Maximum snapshot creation rate per node. Defaults to `40mb` per second. - -`readonly`:: -(Optional, Boolean) -If `true`, the repository is read-only. The cluster can retrieve and restore -snapshots from the repository but not write to the repository or create -snapshots in it. -+ -If `false`, the cluster can write to the repository and create snapshots in it. -Defaults to `false`. -+ -[TIP] -===== -If you register the same snapshot repository with multiple clusters, only -one cluster should have write access to the repository. Having multiple clusters -write to the repository at the same time risks corrupting the contents of the -repository. - -Only a cluster with write access can create snapshots in the repository. All -other clusters connected to the repository should have the `readonly` parameter -set to `true`. This means those clusters can retrieve or restore snapshots from -the repository but not create snapshots in it. -===== -==== - -Other accepted `settings` properties depend on the repository type, set using the -<> parameter. - -.Valid `settings` properties for `fs` repositories -[%collapsible%open] -==== -`location`:: -(Required, string) -Location of the shared filesystem used to store and retrieve snapshots. This -location must be registered in the `path.repo` setting on all master and data -nodes in the cluster. -==== - -.Valid `settings` properties for `source` repositories -[%collapsible%open] -==== -`delegate_type`:: -(Optional, string) -Delegated repository type. For valid values, see the -<>. -+ -`source` repositories can use `settings` properties for its delegated repository -type. See <>. - -==== - -.Valid `settings` properties for `url` repositories -[%collapsible%open] -==== -`url`:: +Settings for the repository. Supported settings vary based on the repository +type: + ---- -(Required, string) -URL location of the root of the shared filesystem repository. The following -protocols are supported: - -* `file` -* `ftp` -* `http` -* `https` -* `jar` - -URLs using the `http`, `https`, or `ftp` protocols must be explicitly allowed -with the <> cluster -setting. This setting supports wildcards in the place of a host, path, query, or -fragment in the URL. - -URLs using the `file` protocol must point to the location of a shared filesystem -accessible to all master and data nodes in the cluster. This location must be -registered in the `path.repo` setting. You don't need to register URLs using the -`ftp`, `http`, `https`, or `jar` protocols in the `path.repo` setting. ---- +-- +* <> +* <> +* <> +* <> +* <> +* <> -`http_max_retries`:: -(Optional, integer) Maximum number of retries for `http` and `https` URLs. -Defaults to `5`. +Other repository types are available through official plugins: -`http_socket_timeout`:: -(Optional, <>) Maximum wait time for data transfers over -a connection. Defaults to `50s`. -==== +* {plugins}/repository-hdfs.html[Hadoop Distributed File System (HDFS) repository] -- `verify`:: diff --git a/docs/reference/snapshot-restore/on-prem-repo-type.asciidoc b/docs/reference/snapshot-restore/on-prem-repo-type.asciidoc new file mode 100644 index 0000000000000..00859394abb99 --- /dev/null +++ b/docs/reference/snapshot-restore/on-prem-repo-type.asciidoc @@ -0,0 +1,2 @@ +NOTE: This repository type is only available if you run {es} on your own +hardware. If you use {ess}, see <>. \ No newline at end of file diff --git a/docs/reference/snapshot-restore/register-repository.asciidoc b/docs/reference/snapshot-restore/register-repository.asciidoc index 05883837744d8..c169454f0794c 100644 --- a/docs/reference/snapshot-restore/register-repository.asciidoc +++ b/docs/reference/snapshot-restore/register-repository.asciidoc @@ -70,11 +70,17 @@ To manage repositories in {kib}, go to the main menu and click **Stack Management** > **Snapshot and Restore** > **Repositories**. To register a snapshot repository, click **Register repository**. +You can also register a repository using the <>. + [discrete] [[snapshot-repo-types]] === Snapshot repository types -Supported snapshot repository types vary based on your deployment type. +Supported snapshot repository types vary based on your deployment type: + +* <> +* <> [discrete] [[ess-repo-types]] @@ -94,10 +100,10 @@ clusters]. {ess} deployments also support the following repository types: +* {cloud}/ec-azure-snapshotting.html[Azure] +* {cloud}/ec-gcs-snapshotting.html[Google Cloud Storage] * {cloud}/ec-aws-custom-repository.html[AWS S3] -* {cloud}/ec-gcs-snapshotting.html[Google Cloud Storage (GCS)] -* {cloud}/ec-azure-snapshotting.html[Microsoft Azure] -* <> +* <> [discrete] [[self-managed-repo-types]] @@ -106,12 +112,12 @@ clusters]. If you run {es} on your own hardware, you can use the following built-in snapshot repository types: -* <> -* <> * <> +* <> +* <> * <> -* <> -* <> +* <> +* <> [[snapshots-repository-plugins]] Other repository types are available through official plugins: @@ -122,108 +128,6 @@ You can also use alternative implementations of these repository types, such as MinIO, as long as they're compatible. To verify a repository's compatibility, see <>. -[discrete] -[[snapshots-filesystem-repository]] -==== Shared file system repository - -// tag::on-prem-repo-type[] -NOTE: This repository type is only available if you run {es} on your own -hardware. If you use {ess}, see <>. -// end::on-prem-repo-type[] - -Use a shared file system repository to store snapshots on a -shared file system. - -To register a shared file system repository, first mount the file system to the -same location on all master and data nodes. Then add the file system's -path or parent directory to the `path.repo` setting in `elasticsearch.yml` for -each master and data node. For running clusters, this requires a -<> of each node. - -IMPORTANT: By default, a network file system (NFS) uses user IDs (UIDs) and -group IDs (GIDs) to match accounts across nodes. If your shared file system is -an NFS and your nodes don't use the same UIDs and GIDs, update your NFS -configuration to account for this. - -Supported `path.repo` values vary by platform: - -include::{es-repo-dir}/tab-widgets/register-fs-repo-widget.asciidoc[] - -[discrete] -[[snapshots-read-only-repository]] -==== Read-only URL repository - -include::register-repository.asciidoc[tag=on-prem-repo-type] - -You can use a URL repository to give a cluster read-only access to a shared file -system. Since URL repositories are always read-only, they're a safer and more -convenient alternative to registering a read-only shared filesystem repository. - -Use {kib} or the <> to -register a URL repository. - -[source,console] ----- -PUT _snapshot/my_read_only_url_repository -{ - "type": "url", - "settings": { - "url": "file:/mount/backups/my_fs_backup_location" - } -} ----- -// TEST[skip:no access to url file path] - -[discrete] -[[snapshots-source-only-repository]] -==== Source-only repository - -You can use a source-only repository to take minimal, source-only snapshots that -use up to 50% less disk space than regular snapshots. - -Unlike other repository types, a source-only repository doesn't directly store -snapshots. It delegates storage to another registered snapshot repository. - -When you take a snapshot using a source-only repository, {es} creates a -source-only snapshot in the delegated storage repository. This snapshot only -contains stored fields and metadata. It doesn't include index or doc values -structures and isn't immediately searchable when restored. To search the -restored data, you first have to <> it into a new data -stream or index. - -[IMPORTANT] -================================================== - -Source-only snapshots are only supported if the `_source` field is enabled and no source-filtering is applied. -When you restore a source-only snapshot: - - * The restored index is read-only and can only serve `match_all` search or scroll requests to enable reindexing. - - * Queries other than `match_all` and `_get` requests are not supported. - - * The mapping of the restored index is empty, but the original mapping is available from the types top - level `meta` element. - -================================================== - -Before registering a source-only repository, use {kib} or the -<> to register a snapshot -repository of another type to use for storage. Then register the source-only -repository and specify the delegated storage repository in the request. - -[source,console] ----- -PUT _snapshot/my_src_only_repository -{ - "type": "source", - "settings": { - "delegate_type": "fs", - "location": "my_backup_location" - } -} ----- -// TEST[continued] - [discrete] [[snapshots-repository-verification]] === Verify a repository @@ -245,7 +149,8 @@ PUT _snapshot/my_unverified_backup?verify=false } } ---- -// TEST[continued] +// TEST[setup:setup-repository] +// TEST[s/my_unverified_backup_location/my_repository/] If wanted, you can manually run the repository verification check. To verify a repository in {kib}, go to the **Repositories** list page and click the name of @@ -257,6 +162,7 @@ a repository. Then click **Verify repository**. You can also use the POST _snapshot/my_unverified_backup/_verify ---- // TEST[continued] +// TEST[s/my_unverified_backup_location/my_repository/] If successful, the request returns a list of nodes used to verify the repository. If verification fails, the request returns an error. @@ -285,7 +191,7 @@ API>>. ---- POST _snapshot/my_repository/_cleanup ---- -// TEST[continued] +// TEST[setup:setup-snapshots] The API returns: @@ -298,6 +204,8 @@ The API returns: } } ---- +// TESTRESPONSE[s/"deleted_bytes": 20/"deleted_bytes": $body.results.deleted_bytes/] +// TESTRESPONSE[s/"deleted_blobs": 5/"deleted_blobs": $body.results.deleted_bytes/] Depending on the concrete repository implementation the numbers shown for bytes free as well as the number of blobs removed will either be an approximation or an exact result. Any non-zero value for the number of blobs removed implies that unreferenced blobs were found and @@ -336,9 +244,9 @@ with {es} until the repository contents are fully restored. If you alter the contents of a repository while it is registered with {es} then the repository may become unreadable or may silently lose some of its contents. - -include::repository-s3.asciidoc[] - -include::repository-gcs.asciidoc[] - include::repository-azure.asciidoc[] +include::repository-gcs.asciidoc[] +include::repository-s3.asciidoc[] +include::repository-shared-file-system.asciidoc[] +include::repository-read-only-url.asciidoc[] +include::repository-source-only.asciidoc[] diff --git a/docs/reference/snapshot-restore/repository-read-only-url.asciidoc b/docs/reference/snapshot-restore/repository-read-only-url.asciidoc new file mode 100644 index 0000000000000..8f9cb7e198f81 --- /dev/null +++ b/docs/reference/snapshot-restore/repository-read-only-url.asciidoc @@ -0,0 +1,76 @@ +[[snapshots-read-only-repository]] +=== Read-only URL repository + +include::{es-repo-dir}/snapshot-restore/on-prem-repo-type.asciidoc[] + +You can use a URL repository to give a cluster read-only access to a shared file +system. Since URL repositories are always read-only, they're a safer and more +convenient alternative to registering a read-only shared filesystem repository. + +Use {kib} or the <> to +register a URL repository. + +[source,console] +---- +PUT _snapshot/my_read_only_url_repository +{ + "type": "url", + "settings": { + "url": "file:/mount/backups/my_fs_backup_location" + } +} +---- +// TEST[skip:no access to url file path] + +[[read-only-url-repository-settings]] +==== Repository settings + +`chunk_size`:: +(Optional, <>) +Maximum size of files in snapshots. In snapshots, files larger than this are +broken down into chunks of this size or smaller. Defaults to `null` (unlimited +file size). + +`http_max_retries`:: +(Optional, integer) Maximum number of retries for `http` and `https` URLs. +Defaults to `5`. + +`http_socket_timeout`:: +(Optional, <>) Maximum wait time for data transfers over +a connection. Defaults to `50s`. + +`compress`:: +(Optional, Boolean) +If `true`, metadata files, such as index mappings and settings, are compressed +in snapshots. Data files are not compressed. Defaults to `true`. + +`max_number_of_snapshots`:: +(Optional, integer) +Maximum number of snapshots the repository can contain. +Defaults to `Integer.MAX_VALUE`, which is `2^31-1` or `2147483647`. + +include::repository-shared-settings.asciidoc[tags=!readonly-repo-setting] + +`url`:: ++ +-- +(Required, string) +URL location of the root of the shared filesystem repository. The following +protocols are supported: + +* `file` +* `ftp` +* `http` +* `https` +* `jar` + +URLs using the `http`, `https`, or `ftp` protocols must be explicitly allowed +with the <> cluster +setting. This setting supports wildcards in the place of a host, path, query, or +fragment in the URL. + +URLs using the `file` protocol must point to the location of a shared filesystem +accessible to all master and data nodes in the cluster. This location must be +registered in the `path.repo` setting. You don't need to register URLs using the +`ftp`, `http`, `https`, or `jar` protocols in the `path.repo` setting. +-- \ No newline at end of file diff --git a/docs/reference/snapshot-restore/repository-shared-file-system.asciidoc b/docs/reference/snapshot-restore/repository-shared-file-system.asciidoc new file mode 100644 index 0000000000000..658dc1b5e698f --- /dev/null +++ b/docs/reference/snapshot-restore/repository-shared-file-system.asciidoc @@ -0,0 +1,49 @@ +[[snapshots-filesystem-repository]] +=== Shared file system repository + +include::{es-repo-dir}/snapshot-restore/on-prem-repo-type.asciidoc[] + +Use a shared file system repository to store snapshots on a +shared file system. + +To register a shared file system repository, first mount the file system to the +same location on all master and data nodes. Then add the file system's +path or parent directory to the `path.repo` setting in `elasticsearch.yml` for +each master and data node. For running clusters, this requires a +<> of each node. + +IMPORTANT: By default, a network file system (NFS) uses user IDs (UIDs) and +group IDs (GIDs) to match accounts across nodes. If your shared file system is +an NFS and your nodes don't use the same UIDs and GIDs, update your NFS +configuration to account for this. + +Supported `path.repo` values vary by platform: + +include::{es-repo-dir}/tab-widgets/register-fs-repo-widget.asciidoc[] + +[[filesystem-repository-settings]] +==== Repository settings + +`chunk_size`:: +(Optional, <>) +Maximum size of files in snapshots. In snapshots, files larger than this are +broken down into chunks of this size or smaller. Defaults to `null` (unlimited +file size). + +`compress`:: +(Optional, Boolean) +If `true`, metadata files, such as index mappings and settings, are compressed +in snapshots. Data files are not compressed. Defaults to `true`. + +`location`:: +(Required, string) +Location of the shared filesystem used to store and retrieve snapshots. This +location must be registered in the `path.repo` setting on all master and data +nodes in the cluster. + +`max_number_of_snapshots`:: +(Optional, integer) +Maximum number of snapshots the repository can contain. +Defaults to `Integer.MAX_VALUE`, which is `2^31-1` or `2147483647`. + +include::repository-shared-settings.asciidoc[] diff --git a/docs/reference/snapshot-restore/repository-shared-settings.asciidoc b/docs/reference/snapshot-restore/repository-shared-settings.asciidoc index 2a4753abee45b..e2f79bb11cb23 100644 --- a/docs/reference/snapshot-restore/repository-shared-settings.asciidoc +++ b/docs/reference/snapshot-restore/repository-shared-settings.asciidoc @@ -1,12 +1,32 @@ `max_restore_bytes_per_sec`:: - - Throttles per node restore rate. Defaults to unlimited. - Note that restores are also throttled through {ref}/recovery.html[recovery settings]. +(Optional, <>) +Maximum snapshot restore rate per node. Defaults to unlimited. Note +that restores are also throttled through <>. `max_snapshot_bytes_per_sec`:: +(Optional, <>) +Maximum snapshot creation rate per node. Defaults to `40mb` per second. - Throttles per node snapshot rate. Defaults to `40mb` per second. - +//tag::readonly-repo-setting[] `readonly`:: +(Optional, Boolean) +If `true`, the repository is read-only. The cluster can retrieve and restore +snapshots from the repository but not write to the repository or create +snapshots in it. ++ +Only a cluster with write access can create snapshots in the repository. All +other clusters connected to the repository should have the `readonly` parameter +set to `true`. ++ +If `false`, the cluster can write to the repository and create snapshots in it. +Defaults to `false`. ++ +[IMPORTANT] +===== +If you register the same snapshot repository with multiple clusters, only +one cluster should have write access to the repository. Having multiple clusters +write to the repository at the same time risks corrupting the contents of the +repository. - Makes repository read-only. Defaults to `false`. +===== +//end::readonly-repo-setting[] diff --git a/docs/reference/snapshot-restore/repository-source-only.asciidoc b/docs/reference/snapshot-restore/repository-source-only.asciidoc new file mode 100644 index 0000000000000..07ddedd197931 --- /dev/null +++ b/docs/reference/snapshot-restore/repository-source-only.asciidoc @@ -0,0 +1,78 @@ +[[snapshots-source-only-repository]] +=== Source-only repository + +You can use a source-only repository to take minimal, source-only snapshots that +use up to 50% less disk space than regular snapshots. + +Unlike other repository types, a source-only repository doesn't directly store +snapshots. It delegates storage to another registered snapshot repository. + +When you take a snapshot using a source-only repository, {es} creates a +source-only snapshot in the delegated storage repository. This snapshot only +contains stored fields and metadata. It doesn't include index or doc values +structures and isn't immediately searchable when restored. To search the +restored data, you first have to <> it into a new data +stream or index. + +[IMPORTANT] +================================================== + +Source-only snapshots are only supported if the `_source` field is enabled and no source-filtering is applied. +When you restore a source-only snapshot: + + * The restored index is read-only and can only serve `match_all` search or scroll requests to enable reindexing. + + * Queries other than `match_all` and `_get` requests are not supported. + + * The mapping of the restored index is empty, but the original mapping is available from the types top + level `meta` element. + +================================================== + +Before registering a source-only repository, use {kib} or the +<> to register a snapshot +repository of another type to use for storage. Then register the source-only +repository and specify the delegated storage repository in the request. + +[source,console] +---- +PUT _snapshot/my_src_only_repository +{ + "type": "source", + "settings": { + "delegate_type": "fs", + "location": "my_backup_repository" + } +} +---- +// TEST[setup:setup-repository] +// TEST[s/my_backup_repository/my_repository/] + +[[source-only-repository-settings]] +==== Repository settings + +`chunk_size`:: +(Optional, <>) +Maximum size of files in snapshots. In snapshots, files larger than this are +broken down into chunks of this size or smaller. Defaults to `null` (unlimited +file size). + +`compress`:: +(Optional, Boolean) +If `true`, metadata files, such as index mappings and settings, are compressed +in snapshots. Data files are not compressed. Defaults to `true`. + +`delegate_type`:: +(Optional, string) +Delegated repository type. For valid values, see the +<>. ++ +`source` repositories can use `settings` properties for its delegated repository +type. See <>. + +`max_number_of_snapshots`:: +(Optional, integer) +Maximum number of snapshots the repository can contain. +Defaults to `Integer.MAX_VALUE`, which is `2^31-1` or `2147483647`. + +include::repository-shared-settings.asciidoc[] From 6aa5288e60f7c66cc443805de1e266f2d5ec918e Mon Sep 17 00:00:00 2001 From: Brad Deam <54515790+b-deam@users.noreply.github.com> Date: Thu, 27 Jan 2022 10:20:56 +1030 Subject: [PATCH 058/100] Add missing ingest path param (#82838) This commit adds the missing 'ingest' param to the REST API spec. --- .../src/main/resources/rest-api-spec/api/nodes.stats.json | 2 ++ 1 file changed, 2 insertions(+) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.stats.json b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.stats.json index 2e19fa1c35dfe..e3ae508b8b1ce 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.stats.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.stats.json @@ -43,6 +43,7 @@ "fs", "http", "indices", + "ingest", "jvm", "os", "process", @@ -69,6 +70,7 @@ "fs", "http", "indices", + "ingest", "jvm", "os", "process", From 15de797c4fd4a2eae99679251f58141b65113d85 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Thu, 27 Jan 2022 13:19:24 +1100 Subject: [PATCH 059/100] Move API Key related classes into apikey package (#82047) API key related classes, including rest actions, request, response, transport actions are scattered in different places. Some of them are in a apikey package. But many of them are not. This PR moves all these classes into the relevant apikey packages. --- .../security/QueryApiKeyResponseTests.java | 4 ++-- .../xpack/core/XPackClientPlugin.java | 6 +++--- .../security/action/{ => apikey}/ApiKey.java | 2 +- .../{ => apikey}/CreateApiKeyAction.java | 2 +- .../{ => apikey}/CreateApiKeyRequest.java | 2 +- .../CreateApiKeyRequestBuilder.java | 2 +- .../{ => apikey}/CreateApiKeyResponse.java | 2 +- .../action/{ => apikey}/GetApiKeyAction.java | 2 +- .../action/{ => apikey}/GetApiKeyRequest.java | 2 +- .../{ => apikey}/GetApiKeyResponse.java | 2 +- .../{ => apikey}/GrantApiKeyAction.java | 2 +- .../{ => apikey}/GrantApiKeyRequest.java | 3 ++- .../{ => apikey}/InvalidateApiKeyAction.java | 2 +- .../{ => apikey}/InvalidateApiKeyRequest.java | 2 +- .../InvalidateApiKeyResponse.java | 2 +- .../action/apikey/QueryApiKeyResponse.java | 1 - .../privilege/ClusterPrivilegeResolver.java | 2 +- .../ManageOwnApiKeyClusterPrivilege.java | 6 +++--- .../authz/store/ReservedRolesStore.java | 2 +- .../action/{ => apikey}/ApiKeyTests.java | 2 +- .../CreateApiKeyRequestBuilderTests.java | 2 +- .../CreateApiKeyRequestTests.java | 2 +- .../CreateApiKeyResponseTests.java | 2 +- .../{ => apikey}/GetApiKeyRequestTests.java | 2 +- .../{ => apikey}/GetApiKeyResponseTests.java | 2 +- .../InvalidateApiKeyRequestTests.java | 2 +- .../InvalidateApiKeyResponseTests.java | 2 +- .../apikey/QueryApiKeyResponseTests.java | 2 -- .../ManageOwnApiKeyClusterPrivilegeTests.java | 4 ++-- .../authz/store/ReservedRolesStoreTests.java | 10 +++++----- .../idp/action/SamlIdentityProviderTests.java | 4 ++-- .../integration/DlsFlsRequestCacheTests.java | 6 +++--- .../security/authc/ApiKeyIntegTests.java | 20 +++++++++---------- .../authc/apikey/ApiKeySingleNodeTests.java | 10 +++++----- .../user/AnonymousUserIntegTests.java | 6 +++--- .../xpack/security/Security.java | 16 +++++++-------- .../TransportCreateApiKeyAction.java | 8 ++++---- .../TransportGetApiKeyAction.java | 8 ++++---- .../TransportGrantApiKeyAction.java | 9 +++++---- .../TransportInvalidateApiKeyAction.java | 8 ++++---- .../audit/logfile/LoggingAuditTrail.java | 12 +++++------ .../xpack/security/authc/ApiKeyService.java | 10 +++++----- .../authc/support/ApiKeyGenerator.java | 4 ++-- .../xpack/security/authz/RBACEngine.java | 4 ++-- .../InternalEnrollmentTokenGenerator.java | 4 ++-- .../action/apikey/RestCreateApiKeyAction.java | 4 ++-- .../action/apikey/RestGetApiKeyAction.java | 6 +++--- .../action/apikey/RestGrantApiKeyAction.java | 8 ++++---- .../apikey/RestInvalidateApiKeyAction.java | 6 +++--- .../TransportGrantApiKeyActionTests.java | 10 +++++----- .../audit/logfile/LoggingAuditTrailTests.java | 12 +++++------ .../security/authc/ApiKeyServiceTests.java | 6 +++--- .../service/ElasticServiceAccountsTests.java | 12 +++++------ .../authc/support/ApiKeyGeneratorTests.java | 4 ++-- .../authz/AuthorizationServiceTests.java | 4 ++-- .../xpack/security/authz/RBACEngineTests.java | 4 ++-- ...InternalEnrollmentTokenGeneratorTests.java | 6 +++--- .../ingest/SetSecurityUserProcessorTests.java | 2 +- .../apikey/RestCreateApiKeyActionTests.java | 4 ++-- .../apikey/RestGetApiKeyActionTests.java | 8 ++++---- .../RestInvalidateApiKeyActionTests.java | 4 ++-- 61 files changed, 154 insertions(+), 155 deletions(-) rename x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/{ => apikey}/ApiKey.java (99%) rename x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/{ => apikey}/CreateApiKeyAction.java (91%) rename x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/{ => apikey}/CreateApiKeyRequest.java (99%) rename x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/{ => apikey}/CreateApiKeyRequestBuilder.java (98%) rename x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/{ => apikey}/CreateApiKeyResponse.java (98%) rename x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/{ => apikey}/GetApiKeyAction.java (91%) rename x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/{ => apikey}/GetApiKeyRequest.java (99%) rename x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/{ => apikey}/GetApiKeyResponse.java (97%) rename x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/{ => apikey}/GrantApiKeyAction.java (93%) rename x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/{ => apikey}/GrantApiKeyRequest.java (94%) rename x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/{ => apikey}/InvalidateApiKeyAction.java (92%) rename x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/{ => apikey}/InvalidateApiKeyRequest.java (99%) rename x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/{ => apikey}/InvalidateApiKeyResponse.java (98%) rename x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/{ => apikey}/ApiKeyTests.java (98%) rename x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/{ => apikey}/CreateApiKeyRequestBuilderTests.java (98%) rename x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/{ => apikey}/CreateApiKeyRequestTests.java (99%) rename x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/{ => apikey}/CreateApiKeyResponseTests.java (98%) rename x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/{ => apikey}/GetApiKeyRequestTests.java (99%) rename x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/{ => apikey}/GetApiKeyResponseTests.java (98%) rename x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/{ => apikey}/InvalidateApiKeyRequestTests.java (99%) rename x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/{ => apikey}/InvalidateApiKeyResponseTests.java (98%) rename x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/{ => apikey}/TransportCreateApiKeyAction.java (90%) rename x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/{ => apikey}/TransportGetApiKeyAction.java (89%) rename x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/{ => apikey}/TransportGrantApiKeyAction.java (87%) rename x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/{ => apikey}/TransportInvalidateApiKeyAction.java (89%) rename x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/{ => apikey}/TransportGrantApiKeyActionTests.java (96%) diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/QueryApiKeyResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/QueryApiKeyResponseTests.java index 0f771d2c713e6..c4c2abc442c25 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/QueryApiKeyResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/QueryApiKeyResponseTests.java @@ -74,9 +74,9 @@ private void assertApiKeyInfo( assertThat(serverItem.getSortValues(), equalTo(clientApiKeyInfo.getSortValues())); } - private org.elasticsearch.xpack.core.security.action.ApiKey randomApiKeyInfo() { + private org.elasticsearch.xpack.core.security.action.apikey.ApiKey randomApiKeyInfo() { final Instant creation = Instant.now(); - return new org.elasticsearch.xpack.core.security.action.ApiKey( + return new org.elasticsearch.xpack.core.security.action.apikey.ApiKey( randomAlphaOfLengthBetween(3, 8), randomAlphaOfLength(20), creation, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java index 2e32a9990c5db..8d78275266a86 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java @@ -159,9 +159,9 @@ import org.elasticsearch.xpack.core.search.action.SubmitAsyncSearchAction; import org.elasticsearch.xpack.core.searchablesnapshots.SearchableSnapshotFeatureSetUsage; import org.elasticsearch.xpack.core.security.SecurityFeatureSetUsage; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyAction; -import org.elasticsearch.xpack.core.security.action.GetApiKeyAction; -import org.elasticsearch.xpack.core.security.action.InvalidateApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.InvalidateApiKeyAction; import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheAction; import org.elasticsearch.xpack.core.security.action.role.ClearRolesCacheAction; import org.elasticsearch.xpack.core.security.action.role.DeleteRoleAction; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/ApiKey.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java similarity index 99% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/ApiKey.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java index 7b65a2e8d9306..8f0993f8d5ad6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/ApiKey.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.core.security.action; +package org.elasticsearch.xpack.core.security.action.apikey; import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/CreateApiKeyAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyAction.java similarity index 91% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/CreateApiKeyAction.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyAction.java index 95d29655d9e38..4af329994355a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/CreateApiKeyAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyAction.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.core.security.action; +package org.elasticsearch.xpack.core.security.action.apikey; import org.elasticsearch.action.ActionType; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/CreateApiKeyRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequest.java similarity index 99% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/CreateApiKeyRequest.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequest.java index dd36380984f4f..61dd31737aba6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/CreateApiKeyRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequest.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.core.security.action; +package org.elasticsearch.xpack.core.security.action.apikey; import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequest; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/CreateApiKeyRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequestBuilder.java similarity index 98% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/CreateApiKeyRequestBuilder.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequestBuilder.java index 7dc0ea61f7ead..cd4cea270de6b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/CreateApiKeyRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequestBuilder.java @@ -4,7 +4,7 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -package org.elasticsearch.xpack.core.security.action; +package org.elasticsearch.xpack.core.security.action.apikey; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.support.WriteRequest; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/CreateApiKeyResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyResponse.java similarity index 98% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/CreateApiKeyResponse.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyResponse.java index a6953c511892d..d718d09f52b17 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/CreateApiKeyResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyResponse.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.core.security.action; +package org.elasticsearch.xpack.core.security.action.apikey; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/GetApiKeyAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyAction.java similarity index 91% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/GetApiKeyAction.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyAction.java index bdf38e0b76a49..a50b78960e5c5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/GetApiKeyAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyAction.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.core.security.action; +package org.elasticsearch.xpack.core.security.action.apikey; import org.elasticsearch.action.ActionType; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/GetApiKeyRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyRequest.java similarity index 99% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/GetApiKeyRequest.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyRequest.java index c9adf9732c9cb..9de4600a1b773 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/GetApiKeyRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyRequest.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.core.security.action; +package org.elasticsearch.xpack.core.security.action.apikey; import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequest; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/GetApiKeyResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyResponse.java similarity index 97% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/GetApiKeyResponse.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyResponse.java index f96831f1a8c01..f92ed3d39d671 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/GetApiKeyResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyResponse.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.core.security.action; +package org.elasticsearch.xpack.core.security.action.apikey; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/GrantApiKeyAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/GrantApiKeyAction.java similarity index 93% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/GrantApiKeyAction.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/GrantApiKeyAction.java index b56fc5c49c4ae..1f9f05ca07e49 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/GrantApiKeyAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/GrantApiKeyAction.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.core.security.action; +package org.elasticsearch.xpack.core.security.action.apikey; import org.elasticsearch.action.ActionType; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/GrantApiKeyRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/GrantApiKeyRequest.java similarity index 94% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/GrantApiKeyRequest.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/GrantApiKeyRequest.java index 3201a706df99a..16a95e349cda8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/GrantApiKeyRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/GrantApiKeyRequest.java @@ -5,12 +5,13 @@ * 2.0. */ -package org.elasticsearch.xpack.core.security.action; +package org.elasticsearch.xpack.core.security.action.apikey; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.core.security.action.GrantRequest; import java.io.IOException; import java.util.Objects; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/InvalidateApiKeyAction.java similarity index 92% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyAction.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/InvalidateApiKeyAction.java index 0889dd78e62ed..2ca54a3b416b0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/InvalidateApiKeyAction.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.core.security.action; +package org.elasticsearch.xpack.core.security.action.apikey; import org.elasticsearch.action.ActionType; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/InvalidateApiKeyRequest.java similarity index 99% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyRequest.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/InvalidateApiKeyRequest.java index 2ee259ba52d9f..7bf28f30cf0a3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/InvalidateApiKeyRequest.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.core.security.action; +package org.elasticsearch.xpack.core.security.action.apikey; import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequest; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/InvalidateApiKeyResponse.java similarity index 98% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyResponse.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/InvalidateApiKeyResponse.java index 9f26bdc142584..1568f7037637b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/InvalidateApiKeyResponse.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.core.security.action; +package org.elasticsearch.xpack.core.security.action.apikey; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionResponse; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/QueryApiKeyResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/QueryApiKeyResponse.java index 9385a0cd9ca52..c8771a1604b03 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/QueryApiKeyResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/QueryApiKeyResponse.java @@ -15,7 +15,6 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.security.action.ApiKey; import java.io.IOException; import java.util.Arrays; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilegeResolver.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilegeResolver.java index 8f8290aa21dfc..ff614fc09c4bd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilegeResolver.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilegeResolver.java @@ -28,7 +28,7 @@ import org.elasticsearch.xpack.core.ilm.action.StartILMAction; import org.elasticsearch.xpack.core.ilm.action.StopILMAction; import org.elasticsearch.xpack.core.security.action.DelegatePkiAuthenticationAction; -import org.elasticsearch.xpack.core.security.action.GrantApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.GrantApiKeyAction; import org.elasticsearch.xpack.core.security.action.saml.SamlSpMetadataAction; import org.elasticsearch.xpack.core.security.action.token.InvalidateTokenAction; import org.elasticsearch.xpack.core.security.action.token.RefreshTokenAction; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageOwnApiKeyClusterPrivilege.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageOwnApiKeyClusterPrivilege.java index 9c50dcd111960..3e4a4c8ffdf3a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageOwnApiKeyClusterPrivilege.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageOwnApiKeyClusterPrivilege.java @@ -9,9 +9,9 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyRequest; -import org.elasticsearch.xpack.core.security.action.GetApiKeyRequest; -import org.elasticsearch.xpack.core.security.action.InvalidateApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.InvalidateApiKeyRequest; import org.elasticsearch.xpack.core.security.action.apikey.QueryApiKeyRequest; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.AuthenticationField; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java index e5d38ae56637d..3e7fe85c132bd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java @@ -18,7 +18,7 @@ import org.elasticsearch.xpack.core.ilm.action.GetLifecycleAction; import org.elasticsearch.xpack.core.ilm.action.PutLifecycleAction; import org.elasticsearch.xpack.core.monitoring.action.MonitoringBulkAction; -import org.elasticsearch.xpack.core.security.action.InvalidateApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.InvalidateApiKeyAction; import org.elasticsearch.xpack.core.security.action.privilege.GetBuiltinPrivilegesAction; import org.elasticsearch.xpack.core.security.action.profile.ActivateProfileAction; import org.elasticsearch.xpack.core.security.action.profile.GetProfileAction; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/ApiKeyTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKeyTests.java similarity index 98% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/ApiKeyTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKeyTests.java index 4bd4b59a657fa..88515ff046460 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/ApiKeyTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKeyTests.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.core.security.action; +package org.elasticsearch.xpack.core.security.action.apikey; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentHelper; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/CreateApiKeyRequestBuilderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequestBuilderTests.java similarity index 98% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/CreateApiKeyRequestBuilderTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequestBuilderTests.java index 16619d913bf39..481fc3e547e41 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/CreateApiKeyRequestBuilderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequestBuilderTests.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.core.security.action; +package org.elasticsearch.xpack.core.security.action.apikey; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.bytes.BytesArray; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/CreateApiKeyRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequestTests.java similarity index 99% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/CreateApiKeyRequestTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequestTests.java index 83bf95ee2fb19..2ed6e31eaf435 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/CreateApiKeyRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequestTests.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.core.security.action; +package org.elasticsearch.xpack.core.security.action.apikey; import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/CreateApiKeyResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyResponseTests.java similarity index 98% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/CreateApiKeyResponseTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyResponseTests.java index 3477e98c41459..f52b1f59dd626 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/CreateApiKeyResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyResponseTests.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.core.security.action; +package org.elasticsearch.xpack.core.security.action.apikey; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.io.stream.BytesStreamOutput; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/GetApiKeyRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyRequestTests.java similarity index 99% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/GetApiKeyRequestTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyRequestTests.java index ecb937368f362..f904f3b24848a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/GetApiKeyRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyRequestTests.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.core.security.action; +package org.elasticsearch.xpack.core.security.action.apikey; import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequest; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/GetApiKeyResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyResponseTests.java similarity index 98% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/GetApiKeyResponseTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyResponseTests.java index 270add8dcc193..1486a1c7edcd7 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/GetApiKeyResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyResponseTests.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.core.security.action; +package org.elasticsearch.xpack.core.security.action.apikey; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.BytesStreamOutput; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/InvalidateApiKeyRequestTests.java similarity index 99% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyRequestTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/InvalidateApiKeyRequestTests.java index 4ac16715bb0aa..82b162a21c46c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/InvalidateApiKeyRequestTests.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.core.security.action; +package org.elasticsearch.xpack.core.security.action.apikey; import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequest; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/InvalidateApiKeyResponseTests.java similarity index 98% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyResponseTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/InvalidateApiKeyResponseTests.java index 09d9ad92ed1c4..b8979d2d734ae 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/InvalidateApiKeyResponseTests.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.core.security.action; +package org.elasticsearch.xpack.core.security.action.apikey; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.Strings; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/QueryApiKeyResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/QueryApiKeyResponseTests.java index 088e7fa7f35a7..b35c0011c96e4 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/QueryApiKeyResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/QueryApiKeyResponseTests.java @@ -9,8 +9,6 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.test.AbstractWireSerializingTestCase; -import org.elasticsearch.xpack.core.security.action.ApiKey; -import org.elasticsearch.xpack.core.security.action.ApiKeyTests; import java.io.IOException; import java.time.Instant; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageOwnApiKeyClusterPrivilegeTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageOwnApiKeyClusterPrivilegeTests.java index 078cfd5bbf4f4..267461cbf73ce 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageOwnApiKeyClusterPrivilegeTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageOwnApiKeyClusterPrivilegeTests.java @@ -11,8 +11,8 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.xpack.core.security.action.GetApiKeyRequest; -import org.elasticsearch.xpack.core.security.action.InvalidateApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.InvalidateApiKeyRequest; import org.elasticsearch.xpack.core.security.action.apikey.QueryApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.QueryApiKeyRequest; import org.elasticsearch.xpack.core.security.authc.Authentication; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index c1dd1e1f87637..a8c2881a39111 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -144,12 +144,12 @@ import org.elasticsearch.xpack.core.ml.notifications.NotificationsIndex; import org.elasticsearch.xpack.core.monitoring.action.MonitoringBulkAction; import org.elasticsearch.xpack.core.rollup.action.GetRollupIndexCapsAction; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyAction; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyRequest; import org.elasticsearch.xpack.core.security.action.DelegatePkiAuthenticationAction; -import org.elasticsearch.xpack.core.security.action.GetApiKeyRequest; -import org.elasticsearch.xpack.core.security.action.GrantApiKeyAction; -import org.elasticsearch.xpack.core.security.action.InvalidateApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.GrantApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.InvalidateApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.QueryApiKeyRequest; import org.elasticsearch.xpack.core.security.action.privilege.DeletePrivilegesAction; import org.elasticsearch.xpack.core.security.action.privilege.DeletePrivilegesRequest; diff --git a/x-pack/plugin/identity-provider/src/internalClusterTest/java/org/elasticsearch/xpack/idp/action/SamlIdentityProviderTests.java b/x-pack/plugin/identity-provider/src/internalClusterTest/java/org/elasticsearch/xpack/idp/action/SamlIdentityProviderTests.java index 21edecb3e622c..b4c635c3600a2 100644 --- a/x-pack/plugin/identity-provider/src/internalClusterTest/java/org/elasticsearch/xpack/idp/action/SamlIdentityProviderTests.java +++ b/x-pack/plugin/identity-provider/src/internalClusterTest/java/org/elasticsearch/xpack/idp/action/SamlIdentityProviderTests.java @@ -24,8 +24,8 @@ import org.elasticsearch.test.rest.yaml.ObjectPath; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyRequestBuilder; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyResponse; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequestBuilder; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyResponse; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.xpack.idp.saml.sp.SamlServiceProviderDocument; import org.elasticsearch.xpack.idp.saml.sp.SamlServiceProviderIndex; diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DlsFlsRequestCacheTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DlsFlsRequestCacheTests.java index b80b6efcac500..f0cd7a3d11fba 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DlsFlsRequestCacheTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DlsFlsRequestCacheTests.java @@ -24,9 +24,9 @@ import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.XPackSettings; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyAction; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyRequest; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyResponse; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyResponse; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.junit.Before; diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java index db3964edac998..2ba3035e4993e 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java @@ -39,19 +39,19 @@ import org.elasticsearch.test.SecuritySettingsSource; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.XPackSettings; -import org.elasticsearch.xpack.core.security.action.ApiKey; -import org.elasticsearch.xpack.core.security.action.ApiKeyTests; import org.elasticsearch.xpack.core.security.action.ClearSecurityCacheAction; import org.elasticsearch.xpack.core.security.action.ClearSecurityCacheRequest; import org.elasticsearch.xpack.core.security.action.ClearSecurityCacheResponse; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyRequestBuilder; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyResponse; -import org.elasticsearch.xpack.core.security.action.GetApiKeyAction; -import org.elasticsearch.xpack.core.security.action.GetApiKeyRequest; -import org.elasticsearch.xpack.core.security.action.GetApiKeyResponse; -import org.elasticsearch.xpack.core.security.action.InvalidateApiKeyAction; -import org.elasticsearch.xpack.core.security.action.InvalidateApiKeyRequest; -import org.elasticsearch.xpack.core.security.action.InvalidateApiKeyResponse; +import org.elasticsearch.xpack.core.security.action.apikey.ApiKey; +import org.elasticsearch.xpack.core.security.action.apikey.ApiKeyTests; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequestBuilder; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyResponse; +import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyResponse; +import org.elasticsearch.xpack.core.security.action.apikey.InvalidateApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.InvalidateApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.InvalidateApiKeyResponse; import org.elasticsearch.xpack.core.security.action.user.PutUserAction; import org.elasticsearch.xpack.core.security.action.user.PutUserRequest; import org.elasticsearch.xpack.core.security.action.user.PutUserResponse; diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/apikey/ApiKeySingleNodeTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/apikey/ApiKeySingleNodeTests.java index 9f831d057cf05..609a8d96d08f2 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/apikey/ApiKeySingleNodeTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/apikey/ApiKeySingleNodeTests.java @@ -24,11 +24,11 @@ import org.elasticsearch.test.XContentTestUtils; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.XPackSettings; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyAction; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyRequest; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyResponse; -import org.elasticsearch.xpack.core.security.action.GrantApiKeyAction; -import org.elasticsearch.xpack.core.security.action.GrantApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyResponse; +import org.elasticsearch.xpack.core.security.action.apikey.GrantApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.GrantApiKeyRequest; import org.elasticsearch.xpack.core.security.action.apikey.QueryApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.QueryApiKeyRequest; import org.elasticsearch.xpack.core.security.action.apikey.QueryApiKeyResponse; diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/user/AnonymousUserIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/user/AnonymousUserIntegTests.java index 2b74f1a4bcde7..26098bead079c 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/user/AnonymousUserIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/user/AnonymousUserIntegTests.java @@ -16,9 +16,9 @@ import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.SecurityIntegTestCase; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyAction; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyRequest; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyResponse; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyResponse; import org.elasticsearch.xpack.core.security.action.service.CreateServiceAccountTokenAction; import org.elasticsearch.xpack.core.security.action.service.CreateServiceAccountTokenRequest; import org.elasticsearch.xpack.core.security.action.service.CreateServiceAccountTokenResponse; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index 3cd8c42a3f641..78ec4b27f69d4 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -92,11 +92,11 @@ import org.elasticsearch.xpack.core.security.SecurityField; import org.elasticsearch.xpack.core.security.SecuritySettings; import org.elasticsearch.xpack.core.security.action.ClearSecurityCacheAction; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyAction; import org.elasticsearch.xpack.core.security.action.DelegatePkiAuthenticationAction; -import org.elasticsearch.xpack.core.security.action.GetApiKeyAction; -import org.elasticsearch.xpack.core.security.action.GrantApiKeyAction; -import org.elasticsearch.xpack.core.security.action.InvalidateApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.GrantApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.InvalidateApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.QueryApiKeyAction; import org.elasticsearch.xpack.core.security.action.enrollment.KibanaEnrollmentAction; import org.elasticsearch.xpack.core.security.action.enrollment.NodeEnrollmentAction; @@ -167,11 +167,11 @@ import org.elasticsearch.xpack.core.ssl.action.TransportGetCertificateInfoAction; import org.elasticsearch.xpack.core.ssl.rest.RestGetCertificateInfoAction; import org.elasticsearch.xpack.security.action.TransportClearSecurityCacheAction; -import org.elasticsearch.xpack.security.action.TransportCreateApiKeyAction; import org.elasticsearch.xpack.security.action.TransportDelegatePkiAuthenticationAction; -import org.elasticsearch.xpack.security.action.TransportGetApiKeyAction; -import org.elasticsearch.xpack.security.action.TransportGrantApiKeyAction; -import org.elasticsearch.xpack.security.action.TransportInvalidateApiKeyAction; +import org.elasticsearch.xpack.security.action.apikey.TransportCreateApiKeyAction; +import org.elasticsearch.xpack.security.action.apikey.TransportGetApiKeyAction; +import org.elasticsearch.xpack.security.action.apikey.TransportGrantApiKeyAction; +import org.elasticsearch.xpack.security.action.apikey.TransportInvalidateApiKeyAction; import org.elasticsearch.xpack.security.action.apikey.TransportQueryApiKeyAction; import org.elasticsearch.xpack.security.action.enrollment.TransportKibanaEnrollmentAction; import org.elasticsearch.xpack.security.action.enrollment.TransportNodeEnrollmentAction; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/TransportCreateApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportCreateApiKeyAction.java similarity index 90% rename from x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/TransportCreateApiKeyAction.java rename to x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportCreateApiKeyAction.java index 8efea7110d407..99d454fdde561 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/TransportCreateApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportCreateApiKeyAction.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.security.action; +package org.elasticsearch.xpack.security.action.apikey; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; @@ -15,9 +15,9 @@ import org.elasticsearch.transport.TransportService; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xpack.core.security.SecurityContext; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyAction; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyRequest; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyResponse; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyResponse; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.security.authc.ApiKeyService; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/TransportGetApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportGetApiKeyAction.java similarity index 89% rename from x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/TransportGetApiKeyAction.java rename to x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportGetApiKeyAction.java index e47fcac23afc5..3ba8672f805b5 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/TransportGetApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportGetApiKeyAction.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.security.action; +package org.elasticsearch.xpack.security.action.apikey; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; @@ -15,9 +15,9 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.security.SecurityContext; -import org.elasticsearch.xpack.core.security.action.GetApiKeyAction; -import org.elasticsearch.xpack.core.security.action.GetApiKeyRequest; -import org.elasticsearch.xpack.core.security.action.GetApiKeyResponse; +import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyResponse; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.security.authc.ApiKeyService; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/TransportGrantApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportGrantApiKeyAction.java similarity index 87% rename from x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/TransportGrantApiKeyAction.java rename to x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportGrantApiKeyAction.java index 6d8209ca90308..d0d7d74d432ef 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/TransportGrantApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportGrantApiKeyAction.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.security.action; +package org.elasticsearch.xpack.security.action.apikey; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; @@ -15,9 +15,10 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xcontent.NamedXContentRegistry; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyResponse; -import org.elasticsearch.xpack.core.security.action.GrantApiKeyAction; -import org.elasticsearch.xpack.core.security.action.GrantApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyResponse; +import org.elasticsearch.xpack.core.security.action.apikey.GrantApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.GrantApiKeyRequest; +import org.elasticsearch.xpack.security.action.TransportGrantAction; import org.elasticsearch.xpack.security.authc.ApiKeyService; import org.elasticsearch.xpack.security.authc.AuthenticationService; import org.elasticsearch.xpack.security.authc.support.ApiKeyGenerator; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/TransportInvalidateApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportInvalidateApiKeyAction.java similarity index 89% rename from x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/TransportInvalidateApiKeyAction.java rename to x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportInvalidateApiKeyAction.java index 462cac914c827..ebcdef0912495 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/TransportInvalidateApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportInvalidateApiKeyAction.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.security.action; +package org.elasticsearch.xpack.security.action.apikey; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; @@ -15,9 +15,9 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.security.SecurityContext; -import org.elasticsearch.xpack.core.security.action.InvalidateApiKeyAction; -import org.elasticsearch.xpack.core.security.action.InvalidateApiKeyRequest; -import org.elasticsearch.xpack.core.security.action.InvalidateApiKeyResponse; +import org.elasticsearch.xpack.core.security.action.apikey.InvalidateApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.InvalidateApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.InvalidateApiKeyResponse; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.security.authc.ApiKeyService; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java index 0d9d820f4bfa7..ba503fec4373c 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java @@ -43,13 +43,13 @@ import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.json.JsonXContent; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyAction; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyRequest; import org.elasticsearch.xpack.core.security.action.Grant; -import org.elasticsearch.xpack.core.security.action.GrantApiKeyAction; -import org.elasticsearch.xpack.core.security.action.GrantApiKeyRequest; -import org.elasticsearch.xpack.core.security.action.InvalidateApiKeyAction; -import org.elasticsearch.xpack.core.security.action.InvalidateApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.GrantApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.GrantApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.InvalidateApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.InvalidateApiKeyRequest; import org.elasticsearch.xpack.core.security.action.privilege.DeletePrivilegesAction; import org.elasticsearch.xpack.core.security.action.privilege.DeletePrivilegesRequest; import org.elasticsearch.xpack.core.security.action.privilege.PutPrivilegesAction; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java index 0dad202d4ea4e..6690b46eac94a 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java @@ -76,14 +76,14 @@ import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.security.ScrollHelper; -import org.elasticsearch.xpack.core.security.action.ApiKey; import org.elasticsearch.xpack.core.security.action.ClearSecurityCacheAction; import org.elasticsearch.xpack.core.security.action.ClearSecurityCacheRequest; import org.elasticsearch.xpack.core.security.action.ClearSecurityCacheResponse; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyRequest; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyResponse; -import org.elasticsearch.xpack.core.security.action.GetApiKeyResponse; -import org.elasticsearch.xpack.core.security.action.InvalidateApiKeyResponse; +import org.elasticsearch.xpack.core.security.action.apikey.ApiKey; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyResponse; +import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyResponse; +import org.elasticsearch.xpack.core.security.action.apikey.InvalidateApiKeyResponse; import org.elasticsearch.xpack.core.security.action.apikey.QueryApiKeyResponse; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/ApiKeyGenerator.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/ApiKeyGenerator.java index 2a65435333c2f..8b157a14b6631 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/ApiKeyGenerator.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/ApiKeyGenerator.java @@ -11,8 +11,8 @@ import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.xcontent.NamedXContentRegistry; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyRequest; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyResponse; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyResponse; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.AuthenticationContext; import org.elasticsearch.xpack.core.security.authc.Subject; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java index 4ee424e95a592..fc44d8efed683 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java @@ -41,8 +41,8 @@ import org.elasticsearch.xpack.core.eql.EqlAsyncActionNames; import org.elasticsearch.xpack.core.search.action.GetAsyncSearchAction; import org.elasticsearch.xpack.core.search.action.SubmitAsyncSearchAction; -import org.elasticsearch.xpack.core.security.action.GetApiKeyAction; -import org.elasticsearch.xpack.core.security.action.GetApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyRequest; import org.elasticsearch.xpack.core.security.action.user.AuthenticateAction; import org.elasticsearch.xpack.core.security.action.user.ChangePasswordAction; import org.elasticsearch.xpack.core.security.action.user.GetUserPrivilegesAction; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGenerator.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGenerator.java index 35b5243680f24..53f0eacb88b06 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGenerator.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGenerator.java @@ -24,8 +24,8 @@ import org.elasticsearch.http.HttpInfo; import org.elasticsearch.transport.TransportInfo; import org.elasticsearch.xpack.core.security.EnrollmentToken; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyAction; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequest; import org.elasticsearch.xpack.core.security.action.enrollment.KibanaEnrollmentAction; import org.elasticsearch.xpack.core.security.action.enrollment.NodeEnrollmentAction; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyAction.java index 0e45832a1b78a..827148e038eb7 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyAction.java @@ -13,8 +13,8 @@ import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyRequest; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyRequestBuilder; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequestBuilder; import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; import java.io.IOException; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyAction.java index 2d2ab19bcd7fe..ca04eb970ba8a 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyAction.java @@ -17,9 +17,9 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.RestBuilderListener; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.security.action.GetApiKeyAction; -import org.elasticsearch.xpack.core.security.action.GetApiKeyRequest; -import org.elasticsearch.xpack.core.security.action.GetApiKeyResponse; +import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyResponse; import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; import java.io.IOException; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGrantApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGrantApiKeyAction.java index 9f8d90937369e..ddf833a4ee87d 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGrantApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGrantApiKeyAction.java @@ -21,10 +21,10 @@ import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyRequestBuilder; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyResponse; -import org.elasticsearch.xpack.core.security.action.GrantApiKeyAction; -import org.elasticsearch.xpack.core.security.action.GrantApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequestBuilder; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyResponse; +import org.elasticsearch.xpack.core.security.action.apikey.GrantApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.GrantApiKeyRequest; import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; import java.io.IOException; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyAction.java index 3071294a5db62..c045e083cb087 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyAction.java @@ -21,9 +21,9 @@ import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xpack.core.security.action.InvalidateApiKeyAction; -import org.elasticsearch.xpack.core.security.action.InvalidateApiKeyRequest; -import org.elasticsearch.xpack.core.security.action.InvalidateApiKeyResponse; +import org.elasticsearch.xpack.core.security.action.apikey.InvalidateApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.InvalidateApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.InvalidateApiKeyResponse; import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; import java.io.IOException; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/TransportGrantApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/apikey/TransportGrantApiKeyActionTests.java similarity index 96% rename from x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/TransportGrantApiKeyActionTests.java rename to x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/apikey/TransportGrantApiKeyActionTests.java index fc4739483f2d6..443f75a87d9eb 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/TransportGrantApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/apikey/TransportGrantApiKeyActionTests.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.security.action; +package org.elasticsearch.xpack.security.action.apikey; import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.ElasticsearchStatusException; @@ -18,10 +18,10 @@ import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyRequest; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyResponse; -import org.elasticsearch.xpack.core.security.action.GrantApiKeyAction; -import org.elasticsearch.xpack.core.security.action.GrantApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyResponse; +import org.elasticsearch.xpack.core.security.action.apikey.GrantApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.GrantApiKeyRequest; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; import org.elasticsearch.xpack.core.security.authc.support.BearerToken; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java index d4f76093f56f2..cc16043697a55 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java @@ -47,12 +47,12 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.XPackSettings; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyAction; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyRequest; -import org.elasticsearch.xpack.core.security.action.GrantApiKeyAction; -import org.elasticsearch.xpack.core.security.action.GrantApiKeyRequest; -import org.elasticsearch.xpack.core.security.action.InvalidateApiKeyAction; -import org.elasticsearch.xpack.core.security.action.InvalidateApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.GrantApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.GrantApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.InvalidateApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.InvalidateApiKeyRequest; import org.elasticsearch.xpack.core.security.action.privilege.DeletePrivilegesAction; import org.elasticsearch.xpack.core.security.action.privilege.DeletePrivilegesRequest; import org.elasticsearch.xpack.core.security.action.privilege.PutPrivilegesAction; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java index 0a4145f3c7455..c57fc3956f6ed 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java @@ -54,9 +54,9 @@ import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.security.SecurityContext; -import org.elasticsearch.xpack.core.security.action.ApiKeyTests; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyRequest; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyResponse; +import org.elasticsearch.xpack.core.security.action.apikey.ApiKeyTests; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyResponse; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.Authentication.AuthenticationType; import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/ElasticServiceAccountsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/ElasticServiceAccountsTests.java index c93c087b405d4..e96925281c8c9 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/ElasticServiceAccountsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/ElasticServiceAccountsTests.java @@ -87,12 +87,12 @@ import org.elasticsearch.xpack.core.ml.action.UpdateProcessAction; import org.elasticsearch.xpack.core.ml.action.ValidateDetectorAction; import org.elasticsearch.xpack.core.ml.action.ValidateJobConfigAction; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyAction; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyRequest; -import org.elasticsearch.xpack.core.security.action.GetApiKeyAction; -import org.elasticsearch.xpack.core.security.action.GetApiKeyRequest; -import org.elasticsearch.xpack.core.security.action.InvalidateApiKeyAction; -import org.elasticsearch.xpack.core.security.action.InvalidateApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.InvalidateApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.InvalidateApiKeyRequest; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.permission.Role; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/ApiKeyGeneratorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/ApiKeyGeneratorTests.java index 7c6300485ff46..3204746c8960b 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/ApiKeyGeneratorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/ApiKeyGeneratorTests.java @@ -13,8 +13,8 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.NamedXContentRegistry; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyRequest; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyResponse; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyResponse; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.Subject; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java index 6ebac279c1d4d..6e942c0a4ac56 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java @@ -115,8 +115,8 @@ import org.elasticsearch.transport.TransportActionProxy; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.security.action.InvalidateApiKeyAction; -import org.elasticsearch.xpack.core.security.action.InvalidateApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.InvalidateApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.InvalidateApiKeyRequest; import org.elasticsearch.xpack.core.security.action.privilege.DeletePrivilegesAction; import org.elasticsearch.xpack.core.security.action.privilege.DeletePrivilegesRequest; import org.elasticsearch.xpack.core.security.action.user.AuthenticateAction; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java index e5d9e4bd53d11..c16dba79c0d23 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java @@ -32,8 +32,8 @@ import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.XPackPlugin; -import org.elasticsearch.xpack.core.security.action.GetApiKeyAction; -import org.elasticsearch.xpack.core.security.action.GetApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyRequest; import org.elasticsearch.xpack.core.security.action.user.AuthenticateAction; import org.elasticsearch.xpack.core.security.action.user.AuthenticateRequest; import org.elasticsearch.xpack.core.security.action.user.AuthenticateRequestBuilder; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGeneratorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGeneratorTests.java index 0a2d54c0fb644..0bcffce1f6cd6 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGeneratorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGeneratorTests.java @@ -29,9 +29,9 @@ import org.elasticsearch.threadpool.FixedExecutorBuilder; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.security.EnrollmentToken; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyAction; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyRequest; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyResponse; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyResponse; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.ssl.SSLService; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/ingest/SetSecurityUserProcessorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/ingest/SetSecurityUserProcessorTests.java index 2f3e26e72850e..480fa13240d32 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/ingest/SetSecurityUserProcessorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/ingest/SetSecurityUserProcessorTests.java @@ -16,7 +16,7 @@ import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.security.SecurityContext; -import org.elasticsearch.xpack.core.security.action.ApiKeyTests; +import org.elasticsearch.xpack.core.security.action.apikey.ApiKeyTests; import org.elasticsearch.xpack.core.security.action.service.TokenInfo; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.Authentication.AuthenticationType; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyActionTests.java index 46b3b960a728c..b56db1eae23d8 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyActionTests.java @@ -28,8 +28,8 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentType; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyRequest; -import org.elasticsearch.xpack.core.security.action.CreateApiKeyResponse; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyResponse; import java.time.Duration; import java.time.Instant; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java index 4f95d0d9e6a47..417757cdaf71d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java @@ -28,10 +28,10 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentType; -import org.elasticsearch.xpack.core.security.action.ApiKey; -import org.elasticsearch.xpack.core.security.action.ApiKeyTests; -import org.elasticsearch.xpack.core.security.action.GetApiKeyRequest; -import org.elasticsearch.xpack.core.security.action.GetApiKeyResponse; +import org.elasticsearch.xpack.core.security.action.apikey.ApiKey; +import org.elasticsearch.xpack.core.security.action.apikey.ApiKeyTests; +import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyResponse; import java.time.Instant; import java.time.temporal.ChronoUnit; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyActionTests.java index 249ba228777fc..e008a674b28fb 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyActionTests.java @@ -28,8 +28,8 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentType; -import org.elasticsearch.xpack.core.security.action.InvalidateApiKeyRequest; -import org.elasticsearch.xpack.core.security.action.InvalidateApiKeyResponse; +import org.elasticsearch.xpack.core.security.action.apikey.InvalidateApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.InvalidateApiKeyResponse; import java.util.Arrays; import java.util.Collections; From 0873893bb721b6a1a959d74d62c43699a1f4a5bf Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Thu, 27 Jan 2022 07:45:51 +0100 Subject: [PATCH 060/100] New GeoHexGrid aggregation (#82924) This commit introduces a new geogrid aggregation called GeoHexGridAggregation that is based in Uber h3 grid. It only supports geo_point fields. --- docs/changelog/82924.yaml | 5 + docs/reference/aggregations/bucket.asciidoc | 2 + .../bucket/geohexgrid-aggregation.asciidoc | 249 ++++++++++++++++++ .../bucket/geogrid/InternalGeoGridBucket.java | 2 +- .../bucket/geogrid/ParsedGeoGrid.java | 2 +- .../bucket/geogrid/GeoGridTestCase.java | 14 +- .../spatial/action/SpatialStatsAction.java | 3 +- x-pack/plugin/spatial/build.gradle | 1 + .../xpack/spatial/SpatialPlugin.java | 60 ++++- .../bucket/geogrid/GeoHexCellIdSource.java | 133 ++++++++++ .../geogrid/GeoHexGridAggregationBuilder.java | 123 +++++++++ .../bucket/geogrid/GeoHexGridAggregator.java | 62 +++++ .../geogrid/GeoHexGridAggregatorFactory.java | 81 ++++++ .../bucket/geogrid/InternalGeoHexGrid.java | 67 +++++ .../geogrid/InternalGeoHexGridBucket.java | 42 +++ .../bucket/geogrid/ParsedGeoHexGrid.java | 34 +++ .../geogrid/ParsedGeoHexGridBucket.java | 34 +++ .../xpack/spatial/SpatialPluginTests.java | 41 +++ .../GeoHexAggregationBuilderTests.java | 65 +++++ .../bucket/geogrid/GeoHexAggregatorTests.java | 87 ++++++ .../bucket/geogrid/GeoHexGridTests.java | 66 +++++ .../test/spatial/80_geohex_grid.yml | 143 ++++++++++ 22 files changed, 1308 insertions(+), 8 deletions(-) create mode 100644 docs/changelog/82924.yaml create mode 100644 docs/reference/aggregations/bucket/geohexgrid-aggregation.asciidoc create mode 100644 x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexCellIdSource.java create mode 100644 x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexGridAggregationBuilder.java create mode 100644 x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexGridAggregator.java create mode 100644 x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexGridAggregatorFactory.java create mode 100644 x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/InternalGeoHexGrid.java create mode 100644 x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/InternalGeoHexGridBucket.java create mode 100644 x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/ParsedGeoHexGrid.java create mode 100644 x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/ParsedGeoHexGridBucket.java create mode 100644 x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexAggregationBuilderTests.java create mode 100644 x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexAggregatorTests.java create mode 100644 x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexGridTests.java create mode 100644 x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/spatial/80_geohex_grid.yml diff --git a/docs/changelog/82924.yaml b/docs/changelog/82924.yaml new file mode 100644 index 0000000000000..d53e2e6bf0ff0 --- /dev/null +++ b/docs/changelog/82924.yaml @@ -0,0 +1,5 @@ +pr: 82924 +summary: New `GeoHexGrid` aggregation +area: Geo +type: feature +issues: [] diff --git a/docs/reference/aggregations/bucket.asciidoc b/docs/reference/aggregations/bucket.asciidoc index dfdaca18e6cfb..af0e338a4588f 100644 --- a/docs/reference/aggregations/bucket.asciidoc +++ b/docs/reference/aggregations/bucket.asciidoc @@ -40,6 +40,8 @@ include::bucket/geodistance-aggregation.asciidoc[] include::bucket/geohashgrid-aggregation.asciidoc[] +include::bucket/geohexgrid-aggregation.asciidoc[] + include::bucket/geotilegrid-aggregation.asciidoc[] include::bucket/global-aggregation.asciidoc[] diff --git a/docs/reference/aggregations/bucket/geohexgrid-aggregation.asciidoc b/docs/reference/aggregations/bucket/geohexgrid-aggregation.asciidoc new file mode 100644 index 0000000000000..528b321cbd90b --- /dev/null +++ b/docs/reference/aggregations/bucket/geohexgrid-aggregation.asciidoc @@ -0,0 +1,249 @@ +[role="xpack"] +[[search-aggregations-bucket-geohexgrid-aggregation]] +=== Geohex grid aggregation +++++ +Geohex grid +++++ + +A multi-bucket aggregation that groups <> +values into buckets that represent a grid. +The resulting grid can be sparse and only +contains cells that have matching data. Each cell corresponds to a +https://h3geo.org/docs/core-library/h3Indexing#h3-cell-indexp[H3 cell index] and is +labeled using the https://h3geo.org/docs/core-library/h3Indexing#h3index-representation[H3Index representation]. + +See https://h3geo.org/docs/core-library/restable[the table of cell areas for H3 +resolutions] on how precision (zoom) correlates to size on the ground. +Precision for this aggregation can be between 0 and 15, inclusive. + +WARNING: High-precision requests can be very expensive in terms of RAM and +result sizes. For example, the highest-precision geohex with a precision of 15 +produces cells that cover less than 10cm by 10cm. We recommend you use a +filter to limit high-precision requests to a smaller geographic area. For an example, +refer to <>. + +[[geohexgrid-low-precision]] +==== Simple low-precision request + +[source,console,id=geohexgrid-aggregation-example] +-------------------------------------------------- +PUT /museums +{ + "mappings": { + "properties": { + "location": { + "type": "geo_point" + } + } + } +} + +POST /museums/_bulk?refresh +{"index":{"_id":1}} +{"location": "52.374081,4.912350", "name": "NEMO Science Museum"} +{"index":{"_id":2}} +{"location": "52.369219,4.901618", "name": "Museum Het Rembrandthuis"} +{"index":{"_id":3}} +{"location": "52.371667,4.914722", "name": "Nederlands Scheepvaartmuseum"} +{"index":{"_id":4}} +{"location": "51.222900,4.405200", "name": "Letterenhuis"} +{"index":{"_id":5}} +{"location": "48.861111,2.336389", "name": "Musée du Louvre"} +{"index":{"_id":6}} +{"location": "48.860000,2.327000", "name": "Musée d'Orsay"} + +POST /museums/_search?size=0 +{ + "aggregations": { + "large-grid": { + "geohex_grid": { + "field": "location", + "precision": 4 + } + } + } +} +-------------------------------------------------- + +Response: + +[source,console-result] +-------------------------------------------------- +{ + ... + "aggregations": { + "large-grid": { + "buckets": [ + { + "key": "841969dffffffff", + "doc_count": 3 + }, + { + "key": "841fb47ffffffff", + "doc_count": 2 + }, + { + "key": "841fa4dffffffff", + "doc_count": 1 + } + ] + } + } +} +-------------------------------------------------- +// TESTRESPONSE[s/\.\.\./"took": $body.took,"_shards": $body._shards,"hits":$body.hits,"timed_out":false,/] + +[[geohexgrid-high-precision]] +==== High-precision requests + +When requesting detailed buckets (typically for displaying a "zoomed in" map), +a filter like <> should be +applied to narrow the subject area. Otherwise, potentially millions of buckets +will be created and returned. + +[source,console,id=geohexgrid-high-precision-ex] +-------------------------------------------------- +POST /museums/_search?size=0 +{ + "aggregations": { + "zoomed-in": { + "filter": { + "geo_bounding_box": { + "location": { + "top_left": "52.4, 4.9", + "bottom_right": "52.3, 5.0" + } + } + }, + "aggregations": { + "zoom1": { + "geohex_grid": { + "field": "location", + "precision": 12 + } + } + } + } + } +} +-------------------------------------------------- +// TEST[continued] + +Response: + +[source,console-result] +-------------------------------------------------- +{ + ... + "aggregations": { + "zoomed-in": { + "doc_count": 3, + "zoom1": { + "buckets": [ + { + "key": "8c1969c9b2617ff", + "doc_count": 1 + }, + { + "key": "8c1969526d753ff", + "doc_count": 1 + }, + { + "key": "8c1969526d26dff", + "doc_count": 1 + } + ] + } + } + } +} +-------------------------------------------------- +// TESTRESPONSE[s/\.\.\./"took": $body.took,"_shards": $body._shards,"hits":$body.hits,"timed_out":false,/] + +[[geohexgrid-addtl-bounding-box-filtering]] +==== Requests with additional bounding box filtering + +The `geohex_grid` aggregation supports an optional `bounds` parameter +that restricts the cells considered to those that intersect the +provided bounds. The `bounds` parameter accepts the same +<> +as the geo-bounding box query. This bounding box can be used with or +without an additional `geo_bounding_box` query for filtering the points prior to aggregating. +It is an independent bounding box that can intersect with, be equal to, or be disjoint +to any additional `geo_bounding_box` queries defined in the context of the aggregation. + +[source,console,id=geohexgrid-aggregation-with-bounds] +-------------------------------------------------- +POST /museums/_search?size=0 +{ + "aggregations": { + "tiles-in-bounds": { + "geohex_grid": { + "field": "location", + "precision": 12, + "bounds": { + "top_left": "52.4, 4.9", + "bottom_right": "52.3, 5.0" + } + } + } + } +} +-------------------------------------------------- +// TEST[continued] + +Response: + +[source,console-result] +-------------------------------------------------- +{ + ... + "aggregations": { + "tiles-in-bounds": { + "buckets": [ + { + "key": "8c1969c9b2617ff", + "doc_count": 1 + }, + { + "key": "8c1969526d753ff", + "doc_count": 1 + }, + { + "key": "8c1969526d26dff", + "doc_count": 1 + } + ] + } + } +} +-------------------------------------------------- +// TESTRESPONSE[s/\.\.\./"took": $body.took,"_shards": $body._shards,"hits":$body.hits,"timed_out":false,/] + +[[geohexgrid-options]] +==== Options + +[horizontal] +field:: +(Required, string) Field containing indexed geo-point values. Must be explicitly +mapped as a <> field. If the field contains an array, +`geohex_grid` aggregates all array values. + +precision:: +(Optional, integer) Integer zoom of the key used to define cells/buckets in +the results. Defaults to `6`. Values outside of [`0`,`15`] will be rejected. + +bounds:: +(Optional, object) Bounding box used to filter the geo-points in each bucket. +Accepts the same bounding box formats as the +<>. + +size:: +(Optional, integer) Maximum number of buckets to return. Defaults to 10,000. +When results are trimmed, buckets are prioritized based on the volume of +documents they contain. + +shard_size:: +(Optional, integer) Number of buckets returned from each shard. Defaults to +`max(10,(size x number-of-shards))` to allow for more a accurate count of the +top cells in the final result. diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java index 9c8ac145fca47..126528ef533fc 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java @@ -51,7 +51,7 @@ public void writeTo(StreamOutput out) throws IOException { aggregations.writeTo(out); } - protected long hashAsLong() { + public long hashAsLong() { return hashAsLong; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/ParsedGeoGrid.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/ParsedGeoGrid.java index 21a0249c485e2..c7a0f5b184a92 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/ParsedGeoGrid.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/ParsedGeoGrid.java @@ -34,7 +34,7 @@ public static ObjectParser createParser( return parser; } - protected void setName(String name) { + public void setName(String name) { super.setName(name); } } diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridTestCase.java index 20bd74356f7e2..aaddf51eb1735 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridTestCase.java @@ -55,16 +55,22 @@ protected int maxNumberOfBuckets() { @Override protected T createTestInstance(String name, Map metadata, InternalAggregations aggregations) { final int precision = randomPrecision(); - int size = randomNumberOfBuckets(); - List buckets = new ArrayList<>(size); + final int size = randomNumberOfBuckets(); + final List buckets = new ArrayList<>(size); + final List seen = new ArrayList<>(size); + int finalSize = 0; for (int i = 0; i < size; i++) { double latitude = randomDoubleBetween(-90.0, 90.0, false); double longitude = randomDoubleBetween(-180.0, 180.0, false); long hashAsLong = longEncode(longitude, latitude, precision); - buckets.add(createInternalGeoGridBucket(hashAsLong, randomInt(IndexWriter.MAX_DOCS), aggregations)); + if (seen.contains(hashAsLong) == false) { // make sure we don't add twice the same bucket + buckets.add(createInternalGeoGridBucket(hashAsLong, randomInt(IndexWriter.MAX_DOCS), aggregations)); + seen.add(hashAsLong); + finalSize++; + } } - return createInternalGeoGrid(name, size, buckets, metadata); + return createInternalGeoGrid(name, finalSize, buckets, metadata); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/spatial/action/SpatialStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/spatial/action/SpatialStatsAction.java index 4e51f35838cad..cd934c2cd5982 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/spatial/action/SpatialStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/spatial/action/SpatialStatsAction.java @@ -39,7 +39,8 @@ private SpatialStatsAction() { * Items to track. Serialized by ordinals. Append only, don't remove or change order of items in this list. */ public enum Item { - GEOLINE + GEOLINE, + GEOHEX } public static class Request extends BaseNodesRequest implements ToXContentObject { diff --git a/x-pack/plugin/spatial/build.gradle b/x-pack/plugin/spatial/build.gradle index 5c10a7181de2a..7930141230015 100644 --- a/x-pack/plugin/spatial/build.gradle +++ b/x-pack/plugin/spatial/build.gradle @@ -14,6 +14,7 @@ dependencies { compileOnly project(path: ':modules:legacy-geo') compileOnly project(':modules:lang-painless:spi') compileOnly project(path: xpackModule('core')) + api project(":libs:elasticsearch-h3") testImplementation(testArtifact(project(xpackModule('core')))) testImplementation project(path: xpackModule('vector-tile')) } diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/SpatialPlugin.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/SpatialPlugin.java index 064e43e2b9e90..8c2c260987e65 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/SpatialPlugin.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/SpatialPlugin.java @@ -31,6 +31,8 @@ import org.elasticsearch.search.aggregations.metrics.GeoCentroidAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.ValueCountAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.ValueCountAggregator; +import org.elasticsearch.search.aggregations.support.CoreValuesSourceType; +import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry; import org.elasticsearch.xcontent.ContextParser; import org.elasticsearch.xpack.core.XPackPlugin; @@ -50,9 +52,13 @@ import org.elasticsearch.xpack.spatial.search.aggregations.bucket.geogrid.BoundedGeoHashGridTiler; import org.elasticsearch.xpack.spatial.search.aggregations.bucket.geogrid.BoundedGeoTileGridTiler; import org.elasticsearch.xpack.spatial.search.aggregations.bucket.geogrid.GeoGridTiler; +import org.elasticsearch.xpack.spatial.search.aggregations.bucket.geogrid.GeoHexCellIdSource; +import org.elasticsearch.xpack.spatial.search.aggregations.bucket.geogrid.GeoHexGridAggregationBuilder; +import org.elasticsearch.xpack.spatial.search.aggregations.bucket.geogrid.GeoHexGridAggregator; import org.elasticsearch.xpack.spatial.search.aggregations.bucket.geogrid.GeoShapeCellIdSource; import org.elasticsearch.xpack.spatial.search.aggregations.bucket.geogrid.GeoShapeHashGridAggregator; import org.elasticsearch.xpack.spatial.search.aggregations.bucket.geogrid.GeoShapeTileGridAggregator; +import org.elasticsearch.xpack.spatial.search.aggregations.bucket.geogrid.InternalGeoHexGrid; import org.elasticsearch.xpack.spatial.search.aggregations.bucket.geogrid.UnboundedGeoHashGridTiler; import org.elasticsearch.xpack.spatial.search.aggregations.bucket.geogrid.UnboundedGeoTileGridTiler; import org.elasticsearch.xpack.spatial.search.aggregations.metrics.GeoShapeBoundsAggregator; @@ -87,6 +93,12 @@ public class SpatialPlugin extends Plugin implements ActionPlugin, MapperPlugin, License.OperationMode.GOLD ); + private final LicensedFeature.Momentary GEO_HEX_AGG_FEATURE = LicensedFeature.momentary( + "spatial", + "geo-hex-agg", + License.OperationMode.GOLD + ); + // to be overriden by tests protected XPackLicenseState getLicenseState() { return XPackPlugin.getSharedLicenseState(); @@ -139,7 +151,12 @@ public List getAggregations() { GeoLineAggregationBuilder.NAME, GeoLineAggregationBuilder::new, usage.track(SpatialStatsAction.Item.GEOLINE, checkLicense(GeoLineAggregationBuilder.PARSER, GEO_LINE_AGG_FEATURE)) - ).addResultReader(InternalGeoLine::new).setAggregatorRegistrar(GeoLineAggregationBuilder::registerUsage) + ).addResultReader(InternalGeoLine::new).setAggregatorRegistrar(GeoLineAggregationBuilder::registerUsage), + new AggregationSpec( + GeoHexGridAggregationBuilder.NAME, + GeoHexGridAggregationBuilder::new, + usage.track(SpatialStatsAction.Item.GEOHEX, checkLicense(GeoHexGridAggregationBuilder.PARSER, GEO_HEX_AGG_FEATURE)) + ).addResultReader(InternalGeoHexGrid::new).setAggregatorRegistrar(this::registerGeoHexGridAggregator) ); } @@ -171,6 +188,47 @@ private void registerGeoShapeCentroidAggregator(ValuesSourceRegistry.Builder bui ); } + private void registerGeoHexGridAggregator(ValuesSourceRegistry.Builder builder) { + builder.register( + GeoHexGridAggregationBuilder.REGISTRY_KEY, + CoreValuesSourceType.GEOPOINT, + ( + name, + factories, + valuesSource, + precision, + geoBoundingBox, + requiredSize, + shardSize, + aggregationContext, + parent, + cardinality, + metadata) -> { + if (GEO_HEX_AGG_FEATURE.check(getLicenseState())) { + GeoHexCellIdSource cellIdSource = new GeoHexCellIdSource( + (ValuesSource.GeoPoint) valuesSource, + precision, + geoBoundingBox + ); + return new GeoHexGridAggregator( + name, + factories, + cellIdSource, + requiredSize, + shardSize, + aggregationContext, + parent, + cardinality, + metadata + ); + } + + throw LicenseUtils.newComplianceException("geohex_grid aggregation on geo_point fields"); + }, + true + ); + } + private void registerGeoShapeGridAggregators(ValuesSourceRegistry.Builder builder) { builder.register( GeoHashGridAggregationBuilder.REGISTRY_KEY, diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexCellIdSource.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexCellIdSource.java new file mode 100644 index 0000000000000..be2589a007707 --- /dev/null +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexCellIdSource.java @@ -0,0 +1,133 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.spatial.search.aggregations.bucket.geogrid; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SortedNumericDocValues; +import org.elasticsearch.common.geo.GeoBoundingBox; +import org.elasticsearch.h3.CellBoundary; +import org.elasticsearch.h3.H3; +import org.elasticsearch.index.fielddata.MultiGeoPointValues; +import org.elasticsearch.index.fielddata.SortedBinaryDocValues; +import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; +import org.elasticsearch.search.aggregations.bucket.geogrid.CellValues; +import org.elasticsearch.search.aggregations.support.ValuesSource; + +/** + * Class to help convert {@link MultiGeoPointValues} + * to GeoHex bucketing. + */ +public class GeoHexCellIdSource extends ValuesSource.Numeric { + private final GeoPoint valuesSource; + private final int precision; + private final GeoBoundingBox geoBoundingBox; + + public GeoHexCellIdSource(GeoPoint valuesSource, int precision, GeoBoundingBox geoBoundingBox) { + this.valuesSource = valuesSource; + this.precision = precision; + this.geoBoundingBox = geoBoundingBox; + } + + public int precision() { + return precision; + } + + @Override + public boolean isFloatingPoint() { + return false; + } + + @Override + public SortedNumericDocValues longValues(LeafReaderContext ctx) { + return geoBoundingBox.isUnbounded() + ? new UnboundedCellValues(valuesSource.geoPointValues(ctx), precision) + : new BoundedCellValues(valuesSource.geoPointValues(ctx), precision, geoBoundingBox); + } + + @Override + public SortedNumericDoubleValues doubleValues(LeafReaderContext ctx) { + throw new UnsupportedOperationException(); + } + + @Override + public SortedBinaryDocValues bytesValues(LeafReaderContext ctx) { + throw new UnsupportedOperationException(); + } + + private static class UnboundedCellValues extends CellValues { + + UnboundedCellValues(MultiGeoPointValues geoValues, int precision) { + super(geoValues, precision); + } + + @Override + protected int advanceValue(org.elasticsearch.common.geo.GeoPoint target, int valuesIdx) { + values[valuesIdx] = H3.geoToH3(target.getLat(), target.getLon(), precision); + return valuesIdx + 1; + } + } + + private static class BoundedCellValues extends CellValues { + + private final boolean crossesDateline; + private final GeoBoundingBox bbox; + + protected BoundedCellValues(MultiGeoPointValues geoValues, int precision, GeoBoundingBox bbox) { + super(geoValues, precision); + this.crossesDateline = bbox.right() < bbox.left(); + this.bbox = bbox; + } + + @Override + public int advanceValue(org.elasticsearch.common.geo.GeoPoint target, int valuesIdx) { + final double lat = target.getLat(); + final double lon = target.getLon(); + final long hex = H3.geoToH3(lat, lon, precision); + // validPoint is a fast check, validHex is slow + if (validPoint(lat, lon) || validHex(hex)) { + values[valuesIdx] = hex; + return valuesIdx + 1; + } + return valuesIdx; + } + + private boolean validPoint(double lat, double lon) { + if (bbox.top() >= lat && bbox.bottom() <= lat) { + if (crossesDateline) { + return bbox.left() <= lon || bbox.right() >= lon; + } else { + return bbox.left() <= lon && bbox.right() >= lon; + } + } + return false; + } + + private boolean validHex(long hex) { + CellBoundary boundary = H3.h3ToGeoBoundary(hex); + double minLat = Double.POSITIVE_INFINITY; + double minLon = Double.POSITIVE_INFINITY; + double maxLat = Double.NEGATIVE_INFINITY; + double maxLon = Double.NEGATIVE_INFINITY; + for (int i = 0; i < boundary.numPoints(); i++) { + double boundaryLat = boundary.getLatLon(i).getLatDeg(); + double boundaryLon = boundary.getLatLon(i).getLonDeg(); + minLon = Math.min(minLon, boundaryLon); + maxLon = Math.max(maxLon, boundaryLon); + minLat = Math.min(minLat, boundaryLat); + maxLat = Math.max(maxLat, boundaryLat); + } + if (bbox.top() > minLat && bbox.bottom() < maxLat) { + if (crossesDateline) { + return bbox.left() < maxLon || bbox.right() > minLon; + } else { + return bbox.left() < maxLon && bbox.right() > minLon; + } + } + return false; + } + } +} diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexGridAggregationBuilder.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexGridAggregationBuilder.java new file mode 100644 index 0000000000000..6f9d1a2f509cf --- /dev/null +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexGridAggregationBuilder.java @@ -0,0 +1,123 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.spatial.search.aggregations.bucket.geogrid; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.geo.GeoBoundingBox; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.h3.H3; +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.AggregatorFactory; +import org.elasticsearch.search.aggregations.bucket.geogrid.GeoGridAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.GeoGridAggregatorSupplier; +import org.elasticsearch.search.aggregations.support.AggregationContext; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; +import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; +import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Map; + +public class GeoHexGridAggregationBuilder extends GeoGridAggregationBuilder { + public static final String NAME = "geohex_grid"; + private static final int DEFAULT_PRECISION = 5; + private static final int DEFAULT_MAX_NUM_CELLS = 10000; + public static final ValuesSourceRegistry.RegistryKey REGISTRY_KEY = new ValuesSourceRegistry.RegistryKey<>( + NAME, + GeoGridAggregatorSupplier.class + ); + + public static final ObjectParser PARSER = createParser( + NAME, + GeoHexGridAggregationBuilder::parsePrecision, + GeoHexGridAggregationBuilder::new + ); + + static int parsePrecision(XContentParser parser) throws IOException, ElasticsearchParseException { + final Object node = parser.currentToken().equals(XContentParser.Token.VALUE_NUMBER) + ? Integer.valueOf(parser.intValue()) + : parser.text(); + return XContentMapValues.nodeIntegerValue(node); + } + + public GeoHexGridAggregationBuilder(String name) { + super(name); + precision(DEFAULT_PRECISION); + size(DEFAULT_MAX_NUM_CELLS); + shardSize = -1; + } + + public GeoHexGridAggregationBuilder(StreamInput in) throws IOException { + super(in); + } + + @Override + public GeoGridAggregationBuilder precision(int precision) { + if (precision < 0 || precision > H3.MAX_H3_RES) { + throw new IllegalArgumentException( + "Invalid geohex aggregation precision of " + precision + "" + ". Must be between 0 and " + H3.MAX_H3_RES + ); + } + this.precision = precision; + return this; + } + + @Override + protected ValuesSourceAggregatorFactory createFactory( + String name, + ValuesSourceConfig config, + int precision, + int requiredSize, + int shardSize, + GeoBoundingBox geoBoundingBox, + AggregationContext context, + AggregatorFactory parent, + AggregatorFactories.Builder subFactoriesBuilder, + Map metadata + ) throws IOException { + return new GeoHexGridAggregatorFactory( + name, + config, + precision, + requiredSize, + shardSize, + geoBoundingBox, + context, + parent, + subFactoriesBuilder, + metadata + ); + } + + private GeoHexGridAggregationBuilder( + GeoHexGridAggregationBuilder clone, + AggregatorFactories.Builder factoriesBuilder, + Map metadata + ) { + super(clone, factoriesBuilder, metadata); + } + + @Override + protected AggregationBuilder shallowCopy(AggregatorFactories.Builder factoriesBuilder, Map metadata) { + return new GeoHexGridAggregationBuilder(this, factoriesBuilder, metadata); + } + + @Override + public String getType() { + return NAME; + } + + @Override + protected ValuesSourceRegistry.RegistryKey getRegistryKey() { + return REGISTRY_KEY; + } +} diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexGridAggregator.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexGridAggregator.java new file mode 100644 index 0000000000000..5f5239d1624a5 --- /dev/null +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexGridAggregator.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.spatial.search.aggregations.bucket.geogrid; + +import org.elasticsearch.search.aggregations.Aggregator; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.CardinalityUpperBound; +import org.elasticsearch.search.aggregations.bucket.geogrid.GeoGridAggregator; +import org.elasticsearch.search.aggregations.bucket.geogrid.InternalGeoGridBucket; +import org.elasticsearch.search.aggregations.support.AggregationContext; +import org.elasticsearch.search.aggregations.support.ValuesSource; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +/** + * Aggregates data expressed as h3 longs (for efficiency's sake) + * but formats results as h3 strings. + */ +public class GeoHexGridAggregator extends GeoGridAggregator { + + public GeoHexGridAggregator( + String name, + AggregatorFactories factories, + ValuesSource.Numeric valuesSource, + int requiredSize, + int shardSize, + AggregationContext context, + Aggregator parent, + CardinalityUpperBound cardinality, + Map metadata + ) throws IOException { + super(name, factories, valuesSource, requiredSize, shardSize, context, parent, cardinality, metadata); + } + + @Override + protected InternalGeoHexGrid buildAggregation( + String name, + int requiredSize, + List buckets, + Map metadata + ) { + return new InternalGeoHexGrid(name, requiredSize, buckets, metadata); + } + + @Override + public InternalGeoHexGrid buildEmptyAggregation() { + return new InternalGeoHexGrid(name, requiredSize, Collections.emptyList(), metadata()); + } + + @Override + protected InternalGeoGridBucket newEmptyBucket() { + return new InternalGeoHexGridBucket(0, 0, null); + } +} diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexGridAggregatorFactory.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexGridAggregatorFactory.java new file mode 100644 index 0000000000000..4870948bc7e93 --- /dev/null +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexGridAggregatorFactory.java @@ -0,0 +1,81 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.spatial.search.aggregations.bucket.geogrid; + +import org.elasticsearch.common.geo.GeoBoundingBox; +import org.elasticsearch.search.aggregations.Aggregator; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.AggregatorFactory; +import org.elasticsearch.search.aggregations.CardinalityUpperBound; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.NonCollectingAggregator; +import org.elasticsearch.search.aggregations.support.AggregationContext; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; +import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; + +import java.io.IOException; +import java.util.Collections; +import java.util.Map; + +public class GeoHexGridAggregatorFactory extends ValuesSourceAggregatorFactory { + + private final int precision; + private final int requiredSize; + private final int shardSize; + private final GeoBoundingBox geoBoundingBox; + + GeoHexGridAggregatorFactory( + String name, + ValuesSourceConfig config, + int precision, + int requiredSize, + int shardSize, + GeoBoundingBox geoBoundingBox, + AggregationContext context, + AggregatorFactory parent, + AggregatorFactories.Builder subFactoriesBuilder, + Map metadata + ) throws IOException { + super(name, config, context, parent, subFactoriesBuilder, metadata); + this.precision = precision; + this.requiredSize = requiredSize; + this.shardSize = shardSize; + this.geoBoundingBox = geoBoundingBox; + } + + @Override + protected Aggregator createUnmapped(Aggregator parent, Map metadata) throws IOException { + final InternalAggregation aggregation = new InternalGeoHexGrid(name, requiredSize, Collections.emptyList(), metadata); + return new NonCollectingAggregator(name, context, parent, factories, metadata) { + @Override + public InternalAggregation buildEmptyAggregation() { + return aggregation; + } + }; + } + + @Override + protected Aggregator doCreateInternal(Aggregator parent, CardinalityUpperBound cardinality, Map metadata) + throws IOException { + return context.getValuesSourceRegistry() + .getAggregator(GeoHexGridAggregationBuilder.REGISTRY_KEY, config) + .build( + name, + factories, + config.getValuesSource(), + precision, + geoBoundingBox, + requiredSize, + shardSize, + context, + parent, + cardinality, + metadata + ); + } +} diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/InternalGeoHexGrid.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/InternalGeoHexGrid.java new file mode 100644 index 0000000000000..07c5dc35c3e72 --- /dev/null +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/InternalGeoHexGrid.java @@ -0,0 +1,67 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.spatial.search.aggregations.bucket.geogrid; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.aggregations.bucket.geogrid.InternalGeoGrid; +import org.elasticsearch.search.aggregations.bucket.geogrid.InternalGeoGridBucket; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +/** + * Represents a grid of cells where each cell's location is determined by a h3 cell. + * All cells in a grid are of the same precision and held internally as a single long + * for efficiency's sake. + */ +public class InternalGeoHexGrid extends InternalGeoGrid { + + InternalGeoHexGrid(String name, int requiredSize, List buckets, Map metadata) { + super(name, requiredSize, buckets, metadata); + } + + public InternalGeoHexGrid(StreamInput in) throws IOException { + super(in); + } + + @Override + public InternalGeoGrid create(List buckets) { + return new InternalGeoHexGrid(name, requiredSize, buckets, metadata); + } + + @Override + public InternalGeoGridBucket createBucket(InternalAggregations aggregations, InternalGeoGridBucket prototype) { + return new InternalGeoHexGridBucket(prototype.hashAsLong(), prototype.getDocCount(), aggregations); + } + + @Override + protected InternalGeoGrid create( + String name, + int requiredSize, + List buckets, + Map metadata + ) { + return new InternalGeoHexGrid(name, requiredSize, buckets, metadata); + } + + @Override + protected InternalGeoHexGridBucket createBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { + return new InternalGeoHexGridBucket(hashAsLong, docCount, aggregations); + } + + @Override + protected Reader getBucketReader() { + return InternalGeoHexGridBucket::new; + } + + @Override + public String getWriteableName() { + return GeoHexGridAggregationBuilder.NAME; + } +} diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/InternalGeoHexGridBucket.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/InternalGeoHexGridBucket.java new file mode 100644 index 0000000000000..f98b8bfe47627 --- /dev/null +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/InternalGeoHexGridBucket.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.spatial.search.aggregations.bucket.geogrid; + +import org.elasticsearch.common.geo.GeoPoint; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.h3.H3; +import org.elasticsearch.h3.LatLng; +import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.aggregations.bucket.geogrid.InternalGeoGridBucket; + +import java.io.IOException; + +public class InternalGeoHexGridBucket extends InternalGeoGridBucket { + + InternalGeoHexGridBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { + super(hashAsLong, docCount, aggregations); + } + + /** + * Read from a stream. + */ + public InternalGeoHexGridBucket(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getKeyAsString() { + return H3.h3ToString(hashAsLong); + } + + @Override + public GeoPoint getKey() { + LatLng latLng = H3.h3ToLatLng(hashAsLong); + return new GeoPoint(latLng.getLatDeg(), latLng.getLonDeg()); + } +} diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/ParsedGeoHexGrid.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/ParsedGeoHexGrid.java new file mode 100644 index 0000000000000..ae8c878391405 --- /dev/null +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/ParsedGeoHexGrid.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.spatial.search.aggregations.bucket.geogrid; + +import org.elasticsearch.search.aggregations.bucket.geogrid.ParsedGeoGrid; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; + +public class ParsedGeoHexGrid extends ParsedGeoGrid { + + private static final ObjectParser PARSER = createParser( + ParsedGeoHexGrid::new, + ParsedGeoHexGridBucket::fromXContent, + ParsedGeoHexGridBucket::fromXContent + ); + + public static ParsedGeoGrid fromXContent(XContentParser parser, String name) throws IOException { + ParsedGeoGrid aggregation = PARSER.parse(parser, null); + aggregation.setName(name); + return aggregation; + } + + @Override + public String getType() { + return GeoHexGridAggregationBuilder.NAME; + } +} diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/ParsedGeoHexGridBucket.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/ParsedGeoHexGridBucket.java new file mode 100644 index 0000000000000..1383e46dcd9e5 --- /dev/null +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/ParsedGeoHexGridBucket.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.spatial.search.aggregations.bucket.geogrid; + +import org.elasticsearch.common.geo.GeoPoint; +import org.elasticsearch.h3.H3; +import org.elasticsearch.h3.LatLng; +import org.elasticsearch.search.aggregations.bucket.geogrid.ParsedGeoGridBucket; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; + +public class ParsedGeoHexGridBucket extends ParsedGeoGridBucket { + + @Override + public GeoPoint getKey() { + LatLng latLng = H3.h3ToLatLng(hashAsString); + return new GeoPoint(latLng.getLatDeg(), latLng.getLonDeg()); + } + + @Override + public String getKeyAsString() { + return hashAsString; + } + + static ParsedGeoHexGridBucket fromXContent(XContentParser parser) throws IOException { + return parseXContent(parser, false, ParsedGeoHexGridBucket::new, (p, bucket) -> bucket.hashAsString = p.text()); + } +} diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/SpatialPluginTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/SpatialPluginTests.java index 2ea9a4205ba5a..8ca7afd4b69d3 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/SpatialPluginTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/SpatialPluginTests.java @@ -10,15 +10,19 @@ import org.elasticsearch.license.License; import org.elasticsearch.license.TestUtils; import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.plugins.SearchPlugin; +import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.CardinalityUpperBound; import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.GeoCentroidAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.GeoGridAggregatorSupplier; import org.elasticsearch.search.aggregations.metrics.MetricAggregatorSupplier; +import org.elasticsearch.search.aggregations.support.CoreValuesSourceType; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.spatial.search.aggregations.bucket.geogrid.GeoHexGridAggregationBuilder; import org.elasticsearch.xpack.spatial.search.aggregations.support.GeoShapeValuesSourceType; import java.util.Arrays; @@ -54,6 +58,43 @@ public void testGeoCentroidLicenseCheck() { } } + public void testGeoHexLicenseCheck() { + for (License.OperationMode operationMode : License.OperationMode.values()) { + SpatialPlugin plugin = getPluginWithOperationMode(operationMode); + ValuesSourceRegistry.Builder registryBuilder = new ValuesSourceRegistry.Builder(); + List specs = plugin.getAggregations(); + specs.forEach(c -> c.getAggregatorRegistrar().accept(registryBuilder)); + ValuesSourceRegistry registry = registryBuilder.build(); + GeoGridAggregatorSupplier hexSupplier = registry.getAggregator( + GeoHexGridAggregationBuilder.REGISTRY_KEY, + new ValuesSourceConfig(CoreValuesSourceType.GEOPOINT, null, true, null, null, null, null, null, null) + ); + if (License.OperationMode.TRIAL != operationMode + && License.OperationMode.compare(operationMode, License.OperationMode.GOLD) < 0) { + ElasticsearchSecurityException exception = expectThrows( + ElasticsearchSecurityException.class, + () -> hexSupplier.build( + null, + AggregatorFactories.EMPTY, + null, + 0, + null, + 0, + 0, + null, + null, + CardinalityUpperBound.NONE, + null + ) + ); + assertThat( + exception.getMessage(), + equalTo("current license is non-compliant for [geohex_grid aggregation on geo_point fields]") + ); + } + } + } + public void testGeoGridLicenseCheck() { for (ValuesSourceRegistry.RegistryKey registryKey : Arrays.asList( GeoHashGridAggregationBuilder.REGISTRY_KEY, diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexAggregationBuilderTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexAggregationBuilderTests.java new file mode 100644 index 0000000000000..dbe960087d91d --- /dev/null +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexAggregationBuilderTests.java @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.spatial.search.aggregations.bucket.geogrid; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.h3.H3; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.spatial.util.GeoTestUtils; + +import java.io.IOException; + +import static org.hamcrest.Matchers.equalTo; + +public class GeoHexAggregationBuilderTests extends AbstractSerializingTestCase { + + @Override + protected GeoHexGridAggregationBuilder doParseInstance(XContentParser parser) throws IOException { + assertThat(parser.nextToken(), equalTo(XContentParser.Token.START_OBJECT)); + assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME)); + String name = parser.currentName(); + assertThat(parser.nextToken(), equalTo(XContentParser.Token.START_OBJECT)); + assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME)); + assertThat(parser.currentName(), equalTo(GeoHexGridAggregationBuilder.NAME)); + GeoHexGridAggregationBuilder parsed = GeoHexGridAggregationBuilder.PARSER.apply(parser, name); + assertThat(parser.nextToken(), equalTo(XContentParser.Token.END_OBJECT)); + assertThat(parser.nextToken(), equalTo(XContentParser.Token.END_OBJECT)); + return parsed; + } + + @Override + protected Writeable.Reader instanceReader() { + return GeoHexGridAggregationBuilder::new; + } + + @Override + protected GeoHexGridAggregationBuilder createTestInstance() { + GeoHexGridAggregationBuilder geoHexGridAggregationBuilder = new GeoHexGridAggregationBuilder("_name"); + geoHexGridAggregationBuilder.field("field"); + if (randomBoolean()) { + geoHexGridAggregationBuilder.precision(randomIntBetween(0, H3.MAX_H3_RES)); + } + if (randomBoolean()) { + geoHexGridAggregationBuilder.size(randomIntBetween(0, 256 * 256)); + } + if (randomBoolean()) { + geoHexGridAggregationBuilder.shardSize(randomIntBetween(0, 256 * 256)); + } + if (randomBoolean()) { + geoHexGridAggregationBuilder.setGeoBoundingBox(GeoTestUtils.randomBBox()); + } + return geoHexGridAggregationBuilder; + } + + public void testInvalidPrecision() { + GeoHexGridAggregationBuilder geoHexGridAggregationBuilder = new GeoHexGridAggregationBuilder("_name"); + expectThrows(IllegalArgumentException.class, () -> geoHexGridAggregationBuilder.precision(16)); + expectThrows(IllegalArgumentException.class, () -> geoHexGridAggregationBuilder.precision(-1)); + } +} diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexAggregatorTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexAggregatorTests.java new file mode 100644 index 0000000000000..18ec429a0c3a1 --- /dev/null +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexAggregatorTests.java @@ -0,0 +1,87 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.spatial.search.aggregations.bucket.geogrid; + +import org.elasticsearch.common.geo.GeoBoundingBox; +import org.elasticsearch.geo.GeometryTestUtils; +import org.elasticsearch.geometry.Point; +import org.elasticsearch.geometry.Rectangle; +import org.elasticsearch.h3.CellBoundary; +import org.elasticsearch.h3.H3; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.plugins.SearchPlugin; +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.geogrid.GeoGridAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.geogrid.GeoGridAggregatorTestCase; +import org.elasticsearch.search.aggregations.support.CoreValuesSourceType; +import org.elasticsearch.search.aggregations.support.ValuesSourceType; +import org.elasticsearch.xpack.spatial.LocalStateSpatialPlugin; +import org.elasticsearch.xpack.spatial.search.aggregations.support.GeoShapeValuesSourceType; +import org.elasticsearch.xpack.spatial.util.GeoTestUtils; + +import java.util.List; + +public class GeoHexAggregatorTests extends GeoGridAggregatorTestCase { + + @Override + protected List getSearchPlugins() { + return List.of(new LocalStateSpatialPlugin()); + } + + @Override + protected List getSupportedValuesSourceTypes() { + return List.of(GeoShapeValuesSourceType.instance(), CoreValuesSourceType.GEOPOINT); + } + + @Override + protected int randomPrecision() { + return randomIntBetween(0, H3.MAX_H3_RES); + } + + @Override + protected String hashAsString(double lng, double lat, int precision) { + return H3.geoToH3Address(lat, lng, precision); + } + + @Override + protected GeoGridAggregationBuilder createBuilder(String name) { + return new GeoHexGridAggregationBuilder(name); + } + + @Override + protected Point randomPoint() { + return GeometryTestUtils.randomPoint(); + } + + @Override + protected GeoBoundingBox randomBBox() { + return GeoTestUtils.randomBBox(); + } + + @Override + protected Rectangle getTile(double lng, double lat, int precision) { + CellBoundary boundary = H3.h3ToGeoBoundary(hashAsString(lng, lat, precision)); + double minLat = Double.POSITIVE_INFINITY; + double minLon = Double.POSITIVE_INFINITY; + double maxLat = Double.NEGATIVE_INFINITY; + double maxLon = Double.NEGATIVE_INFINITY; + for (int i = 0; i < boundary.numPoints(); i++) { + double boundaryLat = boundary.getLatLon(i).getLatDeg(); + double boundaryLon = boundary.getLatLon(i).getLonDeg(); + minLon = Math.min(minLon, boundaryLon); + maxLon = Math.max(maxLon, boundaryLon); + minLat = Math.min(minLat, boundaryLat); + maxLat = Math.max(maxLat, boundaryLat); + } + return new Rectangle(minLon, maxLon, maxLat, minLat); + } + + @Override + protected AggregationBuilder createAggBuilderForTypeTest(MappedFieldType fieldType, String fieldName) { + return createBuilder("foo").field(fieldName); + } +} diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexGridTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexGridTests.java new file mode 100644 index 0000000000000..421e014452024 --- /dev/null +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexGridTests.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.spatial.search.aggregations.bucket.geogrid; + +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.h3.H3; +import org.elasticsearch.plugins.SearchPlugin; +import org.elasticsearch.search.aggregations.Aggregation; +import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.aggregations.bucket.geogrid.GeoGridTestCase; +import org.elasticsearch.search.aggregations.bucket.geogrid.InternalGeoGridBucket; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xpack.spatial.LocalStateSpatialPlugin; + +import java.util.List; +import java.util.Map; + +public class GeoHexGridTests extends GeoGridTestCase { + + @Override + protected SearchPlugin registerPlugin() { + return new LocalStateSpatialPlugin(); + } + + @Override + protected List getNamedXContents() { + return CollectionUtils.appendToCopy( + super.getNamedXContents(), + new NamedXContentRegistry.Entry( + Aggregation.class, + new ParseField(GeoHexGridAggregationBuilder.NAME), + (p, c) -> ParsedGeoHexGrid.fromXContent(p, (String) c) + ) + ); + } + + @Override + protected InternalGeoHexGrid createInternalGeoGrid( + String name, + int size, + List buckets, + Map metadata + ) { + return new InternalGeoHexGrid(name, size, buckets, metadata); + } + + @Override + protected InternalGeoHexGridBucket createInternalGeoGridBucket(Long key, long docCount, InternalAggregations aggregations) { + return new InternalGeoHexGridBucket(key, docCount, aggregations); + } + + @Override + protected long longEncode(double lng, double lat, int precision) { + return H3.geoToH3(lat, lng, precision); + } + + @Override + protected int randomPrecision() { + return randomIntBetween(0, H3.MAX_H3_RES); + } +} diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/spatial/80_geohex_grid.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/spatial/80_geohex_grid.yml new file mode 100644 index 0000000000000..a578e80ad4ddd --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/spatial/80_geohex_grid.yml @@ -0,0 +1,143 @@ +setup: + - do: + indices.create: + index: locations + body: + settings: + number_of_shards: 3 + mappings: + properties: + location: + type: geo_point + + - do: + bulk: + refresh: true + body: + - index: + _index: locations + _id: 1 + - '{"location": "POINT(4.912350 52.374081)", "city": "Amsterdam", "name": "NEMO Science Museum"}' + - index: + _index: locations + _id: 2 + - '{"location": "POINT(4.901618 52.369219)", "city": "Amsterdam", "name": "Museum Het Rembrandthuis"}' + - index: + _index: locations + _id: 3 + - '{"location": "POINT(4.914722 52.371667)", "city": "Amsterdam", "name": "Nederlands Scheepvaartmuseum"}' + - index: + _index: locations + _id: 4 + - '{"location": "POINT(4.405200 51.222900)", "city": "Antwerp", "name": "Letterenhuis"}' + - index: + _index: locations + _id: 5 + - '{"location": "POINT(2.336389 48.861111)", "city": "Paris", "name": "Musée du Louvre"}' + - index: + _index: locations + _id: 6 + - '{"location": "POINT(2.327000 48.860000)", "city": "Paris", "name": "Musée dOrsay"}' + - do: + indices.refresh: {} + +--- +"Test geohex_grid with defaults": + + - do: + search: + index: locations + size: 0 + body: + aggs: + grid: + geohex_grid: + field: location + - match: {hits.total.value: 6 } + - length: { aggregations.grid.buckets: 3 } + - match: { aggregations.grid.buckets.0.key: "85196953fffffff" } + - match: { aggregations.grid.buckets.0.doc_count: 3 } + - match: { aggregations.grid.buckets.1.key: "851fb467fffffff" } + - match: { aggregations.grid.buckets.1.doc_count: 2 } + - match: { aggregations.grid.buckets.2.key: "851fa4c7fffffff" } + - match: { aggregations.grid.buckets.2.doc_count: 1 } + +--- +"Test geohex_grid with precision": + + - do: + search: + index: locations + size: 0 + body: + aggs: + grid: + geohex_grid: + field: location + precision: 0 + - match: { hits.total.value: 6 } + - length: { aggregations.grid.buckets: 2 } + - match: { aggregations.grid.buckets.0.key: "801ffffffffffff" } + - match: { aggregations.grid.buckets.0.doc_count: 4 } + - match: { aggregations.grid.buckets.1.key: "8019fffffffffff" } + - match: { aggregations.grid.buckets.1.doc_count: 2 } + +--- +"Test geohex_grid with size": + + - do: + search: + index: locations + size: 0 + body: + aggs: + grid: + geohex_grid: + field: location + size: 1 + - match: {hits.total.value: 6 } + - length: { aggregations.grid.buckets: 1 } + - match: { aggregations.grid.buckets.0.key: "85196953fffffff" } + - match: { aggregations.grid.buckets.0.doc_count: 3 } + +--- +"Test geohex_grid with shard size": + + - do: + search: + index: locations + size: 0 + body: + aggs: + grid: + geohex_grid: + field: location + shard_size: 10 + - match: {hits.total.value: 6 } + - length: { aggregations.grid.buckets: 3 } + - match: { aggregations.grid.buckets.0.key: "85196953fffffff" } + - match: { aggregations.grid.buckets.0.doc_count: 3 } + - match: { aggregations.grid.buckets.1.key: "851fb467fffffff" } + - match: { aggregations.grid.buckets.1.doc_count: 2 } + - match: { aggregations.grid.buckets.2.key: "851fa4c7fffffff" } + - match: { aggregations.grid.buckets.2.doc_count: 1 } + +--- +"Test geohex_grid with bounds": + + - do: + search: + index: locations + size: 0 + body: + aggs: + grid: + geohex_grid: + field: location + bounds: + top_left: "52.4, 4.9" + bottom_right: "52.3, 5.0" + - match: {hits.total.value: 6 } + - length: { aggregations.grid.buckets: 1 } + - match: { aggregations.grid.buckets.0.key: "85196953fffffff" } + - match: { aggregations.grid.buckets.0.doc_count: 3 } From 7ac00605b8a60a8a327819e9e6274c8649e39c21 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Thu, 27 Jan 2022 08:49:45 +0100 Subject: [PATCH 061/100] Speed up Searchable Snapshot Allocator (#83186) This one still shows up a lot in profiling even when not using searchable snapshots just because these settings are so costly to look up. => fixed the standard way by moving the setting up to index metadata --- .../cluster/metadata/IndexMetadata.java | 29 +++++++++++++++++-- .../metadata/IndexMetadataVerifier.java | 6 ++-- .../metadata/MetadataCreateIndexService.java | 7 ++--- .../index/shard/StoreRecovery.java | 3 +- .../recovery/PeerRecoveryTargetService.java | 3 +- .../repositories/RepositoriesService.java | 8 ++--- .../blobstore/BlobStoreRepository.java | 3 +- .../blobstore/FileRestoreContext.java | 3 +- .../ccr/action/AutoFollowCoordinator.java | 7 ++--- .../ccr/action/TransportPutFollowAction.java | 3 +- .../action/TransportResumeFollowAction.java | 5 ++-- .../elasticsearch/xpack/core/XPackPlugin.java | 3 +- .../xpack/core/ilm/MigrateAction.java | 9 +++--- .../xpack/frozen/FrozenIndices.java | 4 +-- .../action/TransportFreezeIndexAction.java | 3 +- ...ableSnapshotsBlobStoreCacheIntegTests.java | 3 +- .../SearchableSnapshots.java | 8 ++--- .../SearchableSnapshotsUsageTracker.java | 3 +- ...actTransportSearchableSnapshotsAction.java | 5 +--- ...archableSnapshotsUsageTransportAction.java | 5 ++-- ...ransportMountSearchableSnapshotAction.java | 3 +- .../SearchableSnapshotAllocator.java | 4 +-- .../SearchableSnapshotIndexEventListener.java | 5 ++-- ...eSnapshotIndexFoldersDeletionListener.java | 5 ++-- .../DedicatedFrozenNodeAllocationDecider.java | 5 +--- .../HasFrozenCacheAllocationDecider.java | 6 +--- .../SearchableSnapshotAllocationDecider.java | 3 +- ...chableSnapshotEnableAllocationDecider.java | 3 +- ...shotRepositoryExistsAllocationDecider.java | 5 ++-- .../BlobStoreCacheMaintenanceService.java | 13 ++++----- ...archableSnapshotIndexMetadataUpgrader.java | 8 ++--- ...bleSnapshotIndexMetadataUpgraderTests.java | 7 ++--- 32 files changed, 82 insertions(+), 105 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java index ff8da8ddd0df2..77f3c2cceb747 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java @@ -44,6 +44,7 @@ import org.elasticsearch.index.shard.IndexLongFieldRange; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.snapshots.SearchableSnapshotsSettings; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.XContentBuilder; @@ -73,6 +74,7 @@ import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.validateIpValue; import static org.elasticsearch.common.settings.Settings.readSettingsFromStream; import static org.elasticsearch.common.settings.Settings.writeSettingsToStream; +import static org.elasticsearch.snapshots.SearchableSnapshotsSettings.SEARCHABLE_SNAPSHOT_PARTIAL_SETTING_KEY; public class IndexMetadata implements Diffable, ToXContentFragment { @@ -505,6 +507,10 @@ public static APIBlock readFrom(StreamInput input) throws IOException { private final AutoExpandReplicas autoExpandReplicas; + private final boolean isSearchableSnapshot; + + private final boolean isPartialSearchableSnapshot; + private IndexMetadata( final Index index, final long version, @@ -539,7 +545,9 @@ private IndexMetadata( @Nullable final List tierPreference, final int shardsPerNodeLimit, final LifecycleExecutionState lifecycleExecutionState, - final AutoExpandReplicas autoExpandReplicas + final AutoExpandReplicas autoExpandReplicas, + final boolean isSearchableSnapshot, + final boolean isPartialSearchableSnapshot ) { this.index = index; this.version = version; @@ -582,6 +590,8 @@ private IndexMetadata( this.shardsPerNodeLimit = shardsPerNodeLimit; this.lifecycleExecutionState = lifecycleExecutionState; this.autoExpandReplicas = autoExpandReplicas; + this.isSearchableSnapshot = isSearchableSnapshot; + this.isPartialSearchableSnapshot = isPartialSearchableSnapshot; assert numberOfShards * routingFactor == routingNumShards : routingNumShards + " must be a multiple of " + numberOfShards; } @@ -623,7 +633,9 @@ IndexMetadata withMappingMetadata(MappingMetadata mapping) { this.tierPreference, this.shardsPerNodeLimit, this.lifecycleExecutionState, - this.autoExpandReplicas + this.autoExpandReplicas, + this.isSearchableSnapshot, + this.isPartialSearchableSnapshot ); } @@ -755,6 +767,14 @@ public AutoExpandReplicas getAutoExpandReplicas() { return autoExpandReplicas; } + public boolean isSearchableSnapshot() { + return isSearchableSnapshot; + } + + public boolean isPartialSearchableSnapshot() { + return isPartialSearchableSnapshot; + } + /** * Return the concrete mapping for this index or {@code null} if this index has no mappings at all. */ @@ -1588,6 +1608,7 @@ public IndexMetadata build() { lifecycleExecutionState = LifecycleExecutionState.EMPTY_STATE; } + final boolean isSearchableSnapshot = SearchableSnapshotsSettings.isSearchableSnapshotStore(settings); return new IndexMetadata( new Index(index, uuid), version, @@ -1622,7 +1643,9 @@ public IndexMetadata build() { tierPreference, ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING.get(settings), lifecycleExecutionState, - AutoExpandReplicas.SETTING.get(settings) + AutoExpandReplicas.SETTING.get(settings), + isSearchableSnapshot, + isSearchableSnapshot && settings.getAsBoolean(SEARCHABLE_SNAPSHOT_PARTIAL_SETTING_KEY, false) ); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifier.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifier.java index 57911b4204a74..f20bb98c1dcc7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifier.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifier.java @@ -36,8 +36,6 @@ import java.util.Map; import java.util.Set; -import static org.elasticsearch.snapshots.SearchableSnapshotsSettings.isPartialSearchableSnapshotIndex; - /** * This service is responsible for verifying index metadata when an index is introduced * to the cluster, for example when restarting nodes, importing dangling indices, or restoring @@ -240,9 +238,9 @@ IndexMetadata archiveBrokenIndexSettings(IndexMetadata indexMetadata) { * _tier_preference: data_frozen, removing any pre-existing tier allocation rules. */ IndexMetadata convertSharedCacheTierPreference(IndexMetadata indexMetadata) { - final Settings settings = indexMetadata.getSettings(); // Only remove these settings for a shared_cache searchable snapshot - if (isPartialSearchableSnapshotIndex(settings)) { + if (indexMetadata.isPartialSearchableSnapshot()) { + final Settings settings = indexMetadata.getSettings(); final Settings.Builder settingsBuilder = Settings.builder().put(settings); // Clear any allocation rules other than preference for tier settingsBuilder.remove("index.routing.allocation.include._tier"); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java index 03caa7ae588b0..a6c9cb3db42da 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java @@ -105,7 +105,6 @@ import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.resolveSettings; import static org.elasticsearch.index.IndexModule.INDEX_RECOVERY_TYPE_SETTING; import static org.elasticsearch.index.IndexModule.INDEX_STORE_TYPE_SETTING; -import static org.elasticsearch.snapshots.SearchableSnapshotsSettings.isSearchableSnapshotStore; /** * Service responsible for submitting create index requests @@ -1307,7 +1306,7 @@ private static List validateIndexCustomPath(Settings settings, @Nullable */ static List validateShrinkIndex(ClusterState state, String sourceIndex, String targetIndexName, Settings targetIndexSettings) { IndexMetadata sourceMetadata = validateResize(state, sourceIndex, targetIndexName, targetIndexSettings); - if (isSearchableSnapshotStore(sourceMetadata.getSettings())) { + if (sourceMetadata.isSearchableSnapshot()) { throw new IllegalArgumentException("can't shrink searchable snapshot index [" + sourceIndex + ']'); } assert INDEX_NUMBER_OF_SHARDS_SETTING.exists(targetIndexSettings); @@ -1340,7 +1339,7 @@ static List validateShrinkIndex(ClusterState state, String sourceIndex, static void validateSplitIndex(ClusterState state, String sourceIndex, String targetIndexName, Settings targetIndexSettings) { IndexMetadata sourceMetadata = validateResize(state, sourceIndex, targetIndexName, targetIndexSettings); - if (isSearchableSnapshotStore(sourceMetadata.getSettings())) { + if (sourceMetadata.isSearchableSnapshot()) { throw new IllegalArgumentException("can't split searchable snapshot index [" + sourceIndex + ']'); } IndexMetadata.selectSplitShard(0, sourceMetadata, INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings)); @@ -1348,7 +1347,7 @@ static void validateSplitIndex(ClusterState state, String sourceIndex, String ta static void validateCloneIndex(ClusterState state, String sourceIndex, String targetIndexName, Settings targetIndexSettings) { IndexMetadata sourceMetadata = validateResize(state, sourceIndex, targetIndexName, targetIndexSettings); - if (isSearchableSnapshotStore(sourceMetadata.getSettings())) { + if (sourceMetadata.isSearchableSnapshot()) { for (Setting nonCloneableSetting : Arrays.asList(INDEX_STORE_TYPE_SETTING, INDEX_RECOVERY_TYPE_SETTING)) { if (nonCloneableSetting.exists(targetIndexSettings) == false) { throw new IllegalArgumentException( diff --git a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index 4010e256a076b..3f13ad6997bef 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -54,7 +54,6 @@ import static org.elasticsearch.common.lucene.Lucene.indexWriterConfigWithNoMerging; import static org.elasticsearch.core.TimeValue.timeValueMillis; -import static org.elasticsearch.snapshots.SearchableSnapshotsSettings.isSearchableSnapshotStore; /** * This package private utility class encapsulates the logic to recover an index shard from either an existing index on @@ -552,7 +551,7 @@ private void restore( } public static void bootstrap(final IndexShard indexShard, final Store store) throws IOException { - if (isSearchableSnapshotStore(indexShard.indexSettings().getSettings()) == false) { + if (indexShard.indexSettings.getIndexMetadata().isSearchableSnapshot() == false) { // not bootstrapping new history for searchable snapshots (which are read-only) allows sequence-number based peer recoveries store.bootstrapNewHistory(); } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index 9403661e1ce1f..76141af94c53f 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -66,7 +66,6 @@ import static org.elasticsearch.core.TimeValue.timeValueMillis; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; -import static org.elasticsearch.snapshots.SearchableSnapshotsSettings.isSearchableSnapshotStore; /** * The recovery target handles recoveries of peer shards of the shard+node to recover to. @@ -228,7 +227,7 @@ private void doRecovery(final long recoveryId, final StartRecoveryRequest preExi assert recoveryTarget.sourceNode() != null : "can not do a recovery without a source node"; logger.trace("{} preparing shard for peer recovery", recoveryTarget.shardId()); indexShard.prepareForIndexRecovery(); - if (isSearchableSnapshotStore(indexShard.indexSettings().getSettings())) { + if (indexShard.indexSettings().getIndexMetadata().isSearchableSnapshot()) { // for searchable snapshots, peer recovery is treated similarly to recovery from snapshot indexShard.getIndexEventListener().afterFilesRestoredFromRepository(indexShard); final Store store = indexShard.store(); diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java index 2e1a2c23c6dc7..6d1bcd0a131cd 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java @@ -63,7 +63,6 @@ import static java.util.Collections.unmodifiableMap; import static org.elasticsearch.snapshots.SearchableSnapshotsSettings.SEARCHABLE_SNAPSHOTS_REPOSITORY_NAME_SETTING_KEY; import static org.elasticsearch.snapshots.SearchableSnapshotsSettings.SEARCHABLE_SNAPSHOTS_REPOSITORY_UUID_SETTING_KEY; -import static org.elasticsearch.snapshots.SearchableSnapshotsSettings.isSearchableSnapshotStore; /** * Service responsible for maintaining and providing access to snapshot repositories on nodes. @@ -734,7 +733,7 @@ private static void ensureNoSearchableSnapshotsIndicesInUse(ClusterState cluster long count = 0L; List indices = null; for (IndexMetadata indexMetadata : clusterState.metadata()) { - if (indexSettingsMatchRepositoryMetadata(indexMetadata.getSettings(), repositoryMetadata)) { + if (indexSettingsMatchRepositoryMetadata(indexMetadata, repositoryMetadata)) { if (indices == null) { indices = new ArrayList<>(); } @@ -756,8 +755,9 @@ private static void ensureNoSearchableSnapshotsIndicesInUse(ClusterState cluster } } - private static boolean indexSettingsMatchRepositoryMetadata(Settings indexSettings, RepositoryMetadata repositoryMetadata) { - if (isSearchableSnapshotStore(indexSettings)) { + private static boolean indexSettingsMatchRepositoryMetadata(IndexMetadata indexMetadata, RepositoryMetadata repositoryMetadata) { + if (indexMetadata.isSearchableSnapshot()) { + final Settings indexSettings = indexMetadata.getSettings(); final String indexRepositoryUuid = indexSettings.get(SEARCHABLE_SNAPSHOTS_REPOSITORY_UUID_SETTING_KEY); if (Strings.hasLength(indexRepositoryUuid)) { return Objects.equals(repositoryMetadata.uuid(), indexRepositoryUuid); diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index e5e8b8e554fed..28e1897a0272d 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -150,7 +150,6 @@ import java.util.stream.Stream; import static org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo.canonicalName; -import static org.elasticsearch.snapshots.SearchableSnapshotsSettings.isSearchableSnapshotStore; /** * BlobStore - based implementation of Snapshot Repository @@ -2679,7 +2678,7 @@ public void snapshotShard(SnapshotShardContext context) { long indexTotalFileSize = 0; final BlockingQueue filesToSnapshot = new LinkedBlockingQueue<>(); - if (isSearchableSnapshotStore(store.indexSettings().getSettings())) { + if (store.indexSettings().getIndexMetadata().isSearchableSnapshot()) { indexCommitPointFiles = Collections.emptyList(); } else if (filesFromSegmentInfos == null) { // If we did not find a set of files that is equal to the current commit we determine the files to upload by comparing files diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java index 276b9e7d93aeb..ec373aecfa088 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java @@ -31,7 +31,6 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; -import static org.elasticsearch.snapshots.SearchableSnapshotsSettings.isSearchableSnapshotStore; /** * This context will execute a file restore of the lucene files. It is primarily designed to be used to @@ -69,7 +68,7 @@ public void restore(SnapshotFiles snapshotFiles, Store store, ActionListener filesToRecover = new ArrayList<>(); - if (isSearchableSnapshotStore(store.indexSettings().getSettings())) { + if (store.indexSettings().getIndexMetadata().isSearchableSnapshot()) { for (BlobStoreIndexShardSnapshot.FileInfo fileInfo : snapshotFiles.indexFiles()) { assert store.directory().fileLength(fileInfo.physicalName()) == fileInfo.length(); recoveryState.getIndex().addFileDetail(fileInfo.physicalName(), fileInfo.length(), true); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java index 0ee1da85eb990..29a267f6fd78a 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java @@ -37,7 +37,6 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.license.LicenseUtils; -import org.elasticsearch.snapshots.SearchableSnapshotsSettings; import org.elasticsearch.transport.NoSuchRemoteClusterException; import org.elasticsearch.xpack.ccr.Ccr; import org.elasticsearch.xpack.ccr.CcrLicenseChecker; @@ -600,8 +599,8 @@ private void checkAutoFollowPattern( ) ); } else { - final Settings leaderIndexSettings = remoteMetadata.getIndexSafe(indexToFollow).getSettings(); - if (IndexSettings.INDEX_SOFT_DELETES_SETTING.get(leaderIndexSettings) == false) { + final IndexMetadata leaderIndexMetadata = remoteMetadata.getIndexSafe(indexToFollow); + if (IndexSettings.INDEX_SOFT_DELETES_SETTING.get(leaderIndexMetadata.getSettings()) == false) { String message = String.format( Locale.ROOT, "index [%s] cannot be followed, because soft deletes are not enabled", @@ -615,7 +614,7 @@ private void checkAutoFollowPattern( } groupedListener.onResponse(new Tuple<>(indexToFollow, failure)); }); - } else if (SearchableSnapshotsSettings.isSearchableSnapshotStore(leaderIndexSettings)) { + } else if (leaderIndexMetadata.isSearchableSnapshot()) { String message = String.format( Locale.ROOT, "index to follow [%s] is a searchable snapshot index and cannot be used for cross-cluster replication purpose", diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java index 51553b7c42789..fc0c47dc42e92 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java @@ -35,7 +35,6 @@ import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.snapshots.RestoreInfo; import org.elasticsearch.snapshots.RestoreService; -import org.elasticsearch.snapshots.SearchableSnapshotsSettings; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -135,7 +134,7 @@ private void createFollowerIndex( ); return; } - if (SearchableSnapshotsSettings.isSearchableSnapshotStore(leaderIndexMetadata.getSettings())) { + if (leaderIndexMetadata.isSearchableSnapshot()) { listener.onFailure( new IllegalArgumentException( "leader index [" diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java index 58e0defba048f..f1367a29b31fc 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java @@ -43,7 +43,6 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.persistent.PersistentTasksService; -import org.elasticsearch.snapshots.SearchableSnapshotsSettings; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -246,7 +245,7 @@ static void validate( "leader index [" + leaderIndex.getIndex().getName() + "] does not have soft deletes enabled" ); } - if (SearchableSnapshotsSettings.isSearchableSnapshotStore(leaderIndex.getSettings())) { + if (leaderIndex.isSearchableSnapshot()) { throw new IllegalArgumentException( "leader index [" + leaderIndex.getIndex().getName() @@ -256,7 +255,7 @@ static void validate( if (IndexSettings.INDEX_SOFT_DELETES_SETTING.get(followIndex.getSettings()) == false) { throw new IllegalArgumentException("follower index [" + request.getFollowerIndex() + "] does not have soft deletes enabled"); } - if (SearchableSnapshotsSettings.isSearchableSnapshotStore(followIndex.getSettings())) { + if (followIndex.isSearchableSnapshot()) { throw new IllegalArgumentException( "follower index [" + request.getFollowerIndex() diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java index 638abb71eb501..c43ab23b39d0f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java @@ -59,7 +59,6 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; import org.elasticsearch.script.ScriptService; -import org.elasticsearch.snapshots.SearchableSnapshotsSettings; import org.elasticsearch.snapshots.sourceonly.SourceOnlySnapshotRepository; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.watcher.ResourceWatcherService; @@ -437,7 +436,7 @@ public Map getRepositories( @Override public Optional getEngineFactory(IndexSettings indexSettings) { if (indexSettings.getValue(SourceOnlySnapshotRepository.SOURCE_ONLY) - && SearchableSnapshotsSettings.isSearchableSnapshotStore(indexSettings.getSettings()) == false) { + && indexSettings.getIndexMetadata().isSearchableSnapshot() == false) { return Optional.of(SourceOnlySnapshotRepository.getEngineFactory()); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/MigrateAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/MigrateAction.java index 5504be993293e..deb7f91cb41ea 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/MigrateAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/MigrateAction.java @@ -9,12 +9,11 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.routing.allocation.DataTier; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.snapshots.SearchableSnapshotsSettings; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentBuilder; @@ -106,11 +105,11 @@ public List toSteps(Client client, String phase, StepKey nextStepKey) { migrationKey, nextStepKey, (index, clusterState) -> { - Settings indexSettings = clusterState.metadata().index(index).getSettings(); + IndexMetadata indexMetadata = clusterState.metadata().index(index); // partially mounted indices will already have data_frozen, and we don't want to change that if they do - if (SearchableSnapshotsSettings.isPartialSearchableSnapshotIndex(indexSettings)) { - String policyName = LifecycleSettings.LIFECYCLE_NAME_SETTING.get(indexSettings); + if (indexMetadata.isPartialSearchableSnapshot()) { + String policyName = LifecycleSettings.LIFECYCLE_NAME_SETTING.get(indexMetadata.getSettings()); logger.debug( "[{}] action in policy [{}] is configured for index [{}] which is a partially mounted index. " + "skipping this action", diff --git a/x-pack/plugin/frozen-indices/src/main/java/org/elasticsearch/xpack/frozen/FrozenIndices.java b/x-pack/plugin/frozen-indices/src/main/java/org/elasticsearch/xpack/frozen/FrozenIndices.java index 40a8ba102f4f3..cf727d93702bb 100644 --- a/x-pack/plugin/frozen-indices/src/main/java/org/elasticsearch/xpack/frozen/FrozenIndices.java +++ b/x-pack/plugin/frozen-indices/src/main/java/org/elasticsearch/xpack/frozen/FrozenIndices.java @@ -36,13 +36,11 @@ import java.util.Optional; import java.util.function.Supplier; -import static org.elasticsearch.snapshots.SearchableSnapshotsSettings.isSearchableSnapshotStore; - public class FrozenIndices extends Plugin implements ActionPlugin, EnginePlugin { @Override public Optional getEngineFactory(IndexSettings indexSettings) { - if (indexSettings.getValue(FrozenEngine.INDEX_FROZEN) && isSearchableSnapshotStore(indexSettings.getSettings()) == false) { + if (indexSettings.getValue(FrozenEngine.INDEX_FROZEN) && indexSettings.getIndexMetadata().isSearchableSnapshot() == false) { return Optional.of(config -> new FrozenEngine(config, true, false)); } else { return Optional.empty(); diff --git a/x-pack/plugin/frozen-indices/src/main/java/org/elasticsearch/xpack/frozen/action/TransportFreezeIndexAction.java b/x-pack/plugin/frozen-indices/src/main/java/org/elasticsearch/xpack/frozen/action/TransportFreezeIndexAction.java index 4862736883b62..2b702044ce92a 100644 --- a/x-pack/plugin/frozen-indices/src/main/java/org/elasticsearch/xpack/frozen/action/TransportFreezeIndexAction.java +++ b/x-pack/plugin/frozen-indices/src/main/java/org/elasticsearch/xpack/frozen/action/TransportFreezeIndexAction.java @@ -38,7 +38,6 @@ import org.elasticsearch.index.engine.frozen.FrozenEngine; import org.elasticsearch.protocol.xpack.frozen.FreezeRequest; import org.elasticsearch.protocol.xpack.frozen.FreezeResponse; -import org.elasticsearch.snapshots.SearchableSnapshotsSettings; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -194,7 +193,7 @@ public ClusterState execute(ClusterState currentState) { } else { settingsBuilder.remove(FrozenEngine.INDEX_FROZEN.getKey()); settingsBuilder.remove(IndexSettings.INDEX_SEARCH_THROTTLED.getKey()); - if (SearchableSnapshotsSettings.isSearchableSnapshotStore(indexMetadata.getSettings()) == false) { + if (indexMetadata.isSearchableSnapshot() == false) { settingsBuilder.remove("index.blocks.write"); blocks.removeIndexBlock(index.getName(), IndexMetadata.INDEX_WRITE_BLOCK); } diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheIntegTests.java index 07aac8fe2bb8b..8cf3e142aeb47 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheIntegTests.java @@ -36,7 +36,6 @@ import org.elasticsearch.plugins.ClusterPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.reindex.ReindexPlugin; -import org.elasticsearch.snapshots.SearchableSnapshotsSettings; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.xcontent.XContentBuilder; @@ -406,7 +405,7 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing @Override public Decision canAllocate(ShardRouting shardRouting, RoutingAllocation allocation) { final IndexMetadata indexMetadata = allocation.metadata().index(shardRouting.index()); - if (SearchableSnapshotsSettings.isSearchableSnapshotStore(indexMetadata.getSettings()) == false) { + if (indexMetadata.isSearchableSnapshot() == false) { return allocation.decision(Decision.YES, name, "index is not a searchable snapshot shard - can allocate"); } if (allocation.metadata().hasIndex(SNAPSHOT_BLOB_CACHE_INDEX) == false) { diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java index 88af9c1ee5c8f..28cef6972562a 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java @@ -127,8 +127,6 @@ import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; import static org.elasticsearch.snapshots.SearchableSnapshotsSettings.SEARCHABLE_SNAPSHOT_STORE_TYPE; -import static org.elasticsearch.snapshots.SearchableSnapshotsSettings.isPartialSearchableSnapshotIndex; -import static org.elasticsearch.snapshots.SearchableSnapshotsSettings.isSearchableSnapshotStore; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xpack.core.ClientHelper.SEARCHABLE_SNAPSHOTS_ORIGIN; import static org.elasticsearch.xpack.core.searchablesnapshots.SearchableSnapshotsConstants.SEARCHABLE_SNAPSHOT_FEATURE; @@ -376,7 +374,7 @@ public Collection createComponents( @Override public void onIndexModule(IndexModule indexModule) { - if (isSearchableSnapshotStore(indexModule.getSettings())) { + if (indexModule.indexSettings().getIndexMetadata().isSearchableSnapshot()) { indexModule.addIndexEventListener( new SearchableSnapshotIndexEventListener(settings, cacheService.get(), frozenCacheService.get()) ); @@ -449,9 +447,9 @@ public Map getDirectoryFactories() { @Override public Optional getEngineFactory(IndexSettings indexSettings) { - if (isSearchableSnapshotStore(indexSettings.getSettings())) { + if (indexSettings.getIndexMetadata().isSearchableSnapshot()) { final Boolean frozen = indexSettings.getSettings().getAsBoolean("index.frozen", null); - final boolean useFrozenEngine = isPartialSearchableSnapshotIndex(indexSettings.getSettings()) + final boolean useFrozenEngine = indexSettings.getIndexMetadata().isPartialSearchableSnapshot() && (frozen == null || frozen.equals(Boolean.TRUE)); if (useFrozenEngine) { diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsUsageTracker.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsUsageTracker.java index 054afb0519cff..3e2ce639481ca 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsUsageTracker.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsUsageTracker.java @@ -10,7 +10,6 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.license.XPackLicenseState; -import org.elasticsearch.snapshots.SearchableSnapshotsSettings; import java.util.function.Supplier; @@ -35,7 +34,7 @@ public void run() { private static boolean hasSearchableSnapshotsIndices(ClusterState state) { for (IndexMetadata indexMetadata : state.metadata()) { - if (SearchableSnapshotsSettings.isSearchableSnapshotStore(indexMetadata.getSettings())) { + if (indexMetadata.isSearchableSnapshot()) { return true; } } diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/AbstractTransportSearchableSnapshotsAction.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/AbstractTransportSearchableSnapshotsAction.java index 119c55bd66777..e7ba26367aca8 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/AbstractTransportSearchableSnapshotsAction.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/AbstractTransportSearchableSnapshotsAction.java @@ -21,11 +21,9 @@ import org.elasticsearch.cluster.routing.ShardsIterator; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.license.XPackLicenseState; -import org.elasticsearch.snapshots.SearchableSnapshotsSettings; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.searchablesnapshots.SearchableSnapshots; @@ -95,8 +93,7 @@ protected ShardsIterator shards(ClusterState state, Request request, String[] co for (String concreteIndex : concreteIndices) { IndexMetadata indexMetaData = state.metadata().index(concreteIndex); if (indexMetaData != null) { - Settings indexSettings = indexMetaData.getSettings(); - if (SearchableSnapshotsSettings.isSearchableSnapshotStore(indexSettings)) { + if (indexMetaData.isSearchableSnapshot()) { searchableSnapshotIndices.add(concreteIndex); } } diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/SearchableSnapshotsUsageTransportAction.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/SearchableSnapshotsUsageTransportAction.java index abc602bc91da6..0c09a0e31091d 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/SearchableSnapshotsUsageTransportAction.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/SearchableSnapshotsUsageTransportAction.java @@ -16,7 +16,6 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.protocol.xpack.XPackUsageRequest; -import org.elasticsearch.snapshots.SearchableSnapshotsSettings; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -61,8 +60,8 @@ protected void masterOperation( int numFullCopySnapIndices = 0; int numSharedCacheSnapIndices = 0; for (IndexMetadata indexMetadata : state.metadata()) { - if (SearchableSnapshotsSettings.isSearchableSnapshotStore(indexMetadata.getSettings())) { - if (SearchableSnapshotsSettings.SNAPSHOT_PARTIAL_SETTING.get(indexMetadata.getSettings())) { + if (indexMetadata.isSearchableSnapshot()) { + if (indexMetadata.isPartialSearchableSnapshot()) { numSharedCacheSnapIndices++; } else { numFullCopySnapIndices++; diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/TransportMountSearchableSnapshotAction.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/TransportMountSearchableSnapshotAction.java index ab2ff7d4ea5e1..1b8bf8c991efc 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/TransportMountSearchableSnapshotAction.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/TransportMountSearchableSnapshotAction.java @@ -59,7 +59,6 @@ import static org.elasticsearch.index.IndexModule.INDEX_RECOVERY_TYPE_SETTING; import static org.elasticsearch.index.IndexModule.INDEX_STORE_TYPE_SETTING; import static org.elasticsearch.snapshots.SearchableSnapshotsSettings.SEARCHABLE_SNAPSHOT_STORE_TYPE; -import static org.elasticsearch.snapshots.SearchableSnapshotsSettings.isSearchableSnapshotStore; /** * Action that mounts a snapshot as a searchable snapshot, by converting the mount request into a restore request with specific settings @@ -199,7 +198,7 @@ protected void masterOperation( final SnapshotId snapshotId = matchingSnapshotId.get(); final IndexMetadata indexMetadata = repository.getSnapshotIndexMetaData(repoData, snapshotId, indexId); - if (isSearchableSnapshotStore(indexMetadata.getSettings())) { + if (indexMetadata.isSearchableSnapshot()) { throw new IllegalArgumentException( String.format( Locale.ROOT, diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotAllocator.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotAllocator.java index 9d1c75752f9b6..2d99fef2845a3 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotAllocator.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotAllocator.java @@ -44,7 +44,6 @@ import org.elasticsearch.gateway.ReplicaShardAllocator; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.repositories.IndexId; -import org.elasticsearch.snapshots.SearchableSnapshotsSettings; import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.xpack.searchablesnapshots.action.cache.TransportSearchableSnapshotCacheStoresAction; @@ -109,8 +108,7 @@ public SearchableSnapshotAllocator(Client client, RerouteService rerouteService, public void beforeAllocation(RoutingAllocation allocation) { boolean hasPartialIndices = false; for (IndexMetadata indexMetadata : allocation.metadata()) { - final Settings indexSettings = indexMetadata.getSettings(); - if (SearchableSnapshotsSettings.isPartialSearchableSnapshotIndex(indexSettings)) { + if (indexMetadata.isPartialSearchableSnapshot()) { hasPartialIndices = true; break; } diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotIndexEventListener.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotIndexEventListener.java index 9186d2e5d3e5a..704daebc6c359 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotIndexEventListener.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotIndexEventListener.java @@ -26,7 +26,6 @@ import org.elasticsearch.xpack.searchablesnapshots.cache.shared.FrozenCacheService; import org.elasticsearch.xpack.searchablesnapshots.store.SearchableSnapshotDirectory; -import static org.elasticsearch.snapshots.SearchableSnapshotsSettings.isSearchableSnapshotStore; import static org.elasticsearch.xpack.searchablesnapshots.SearchableSnapshots.SNAPSHOT_INDEX_NAME_SETTING; import static org.elasticsearch.xpack.searchablesnapshots.SearchableSnapshots.SNAPSHOT_SNAPSHOT_ID_SETTING; import static org.elasticsearch.xpack.searchablesnapshots.store.SearchableSnapshotDirectory.unwrapDirectory; @@ -89,8 +88,8 @@ private static void ensureSnapshotIsLoaded(IndexShard indexShard) { @Override public void beforeIndexRemoved(IndexService indexService, IndexRemovalReason reason) { if (shouldEvictCacheFiles(reason)) { - final IndexSettings indexSettings = indexService.getIndexSettings(); - if (isSearchableSnapshotStore(indexSettings.getSettings())) { + if (indexService.getMetadata().isSearchableSnapshot()) { + final IndexSettings indexSettings = indexService.getIndexSettings(); for (IndexShard indexShard : indexService) { final ShardId shardId = indexShard.shardId(); diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotIndexFoldersDeletionListener.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotIndexFoldersDeletionListener.java index 3805e965c1b07..328ea6dda8e67 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotIndexFoldersDeletionListener.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotIndexFoldersDeletionListener.java @@ -20,7 +20,6 @@ import java.util.Objects; import java.util.function.Supplier; -import static org.elasticsearch.snapshots.SearchableSnapshotsSettings.isSearchableSnapshotStore; import static org.elasticsearch.xpack.searchablesnapshots.SearchableSnapshots.SNAPSHOT_INDEX_NAME_SETTING; import static org.elasticsearch.xpack.searchablesnapshots.SearchableSnapshots.SNAPSHOT_SNAPSHOT_ID_SETTING; @@ -46,7 +45,7 @@ public SearchableSnapshotIndexFoldersDeletionListener( @Override public void beforeIndexFoldersDeleted(Index index, IndexSettings indexSettings, Path[] indexPaths) { - if (isSearchableSnapshotStore(indexSettings.getSettings())) { + if (indexSettings.getIndexMetadata().isSearchableSnapshot()) { for (int shard = 0; shard < indexSettings.getNumberOfShards(); shard++) { markShardAsEvictedInCache(new ShardId(index, shard), indexSettings); } @@ -55,7 +54,7 @@ public void beforeIndexFoldersDeleted(Index index, IndexSettings indexSettings, @Override public void beforeShardFoldersDeleted(ShardId shardId, IndexSettings indexSettings, Path[] shardPaths) { - if (isSearchableSnapshotStore(indexSettings.getSettings())) { + if (indexSettings.getIndexMetadata().isSearchableSnapshot()) { markShardAsEvictedInCache(shardId, indexSettings); } } diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/decider/DedicatedFrozenNodeAllocationDecider.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/decider/DedicatedFrozenNodeAllocationDecider.java index 0c02f130cf480..2cc1309423721 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/decider/DedicatedFrozenNodeAllocationDecider.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/decider/DedicatedFrozenNodeAllocationDecider.java @@ -15,8 +15,6 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.Decision; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.snapshots.SearchableSnapshotsSettings; import static org.elasticsearch.cluster.node.DiscoveryNodeRole.DATA_FROZEN_NODE_ROLE; @@ -81,8 +79,7 @@ private Decision canAllocateToNode(IndexMetadata indexMetadata, DiscoveryNode di return YES_NOT_DEDICATED_FROZEN_NODE; } - final Settings indexSettings = indexMetadata.getSettings(); - if (SearchableSnapshotsSettings.isPartialSearchableSnapshotIndex(indexSettings)) { + if (indexMetadata.isPartialSearchableSnapshot()) { return YES_IS_PARTIAL_SEARCHABLE_SNAPSHOT; } diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/decider/HasFrozenCacheAllocationDecider.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/decider/HasFrozenCacheAllocationDecider.java index a559a5978200d..666b33512f351 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/decider/HasFrozenCacheAllocationDecider.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/decider/HasFrozenCacheAllocationDecider.java @@ -14,8 +14,6 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.Decision; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.snapshots.SearchableSnapshotsSettings; import org.elasticsearch.xpack.searchablesnapshots.cache.shared.FrozenCacheInfoService; import static org.elasticsearch.xpack.searchablesnapshots.cache.shared.FrozenCacheService.SHARED_CACHE_SIZE_SETTING; @@ -77,9 +75,7 @@ public Decision shouldAutoExpandToNode(IndexMetadata indexMetadata, DiscoveryNod } private Decision canAllocateToNode(IndexMetadata indexMetadata, DiscoveryNode discoveryNode) { - final Settings indexSettings = indexMetadata.getSettings(); - - if (SearchableSnapshotsSettings.isPartialSearchableSnapshotIndex(indexSettings) == false) { + if (indexMetadata.isPartialSearchableSnapshot() == false) { return Decision.ALWAYS; } diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/decider/SearchableSnapshotAllocationDecider.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/decider/SearchableSnapshotAllocationDecider.java index 66c10768630a4..05310090624e9 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/decider/SearchableSnapshotAllocationDecider.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/decider/SearchableSnapshotAllocationDecider.java @@ -13,7 +13,6 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.Decision; -import org.elasticsearch.snapshots.SearchableSnapshotsSettings; import java.util.function.BooleanSupplier; @@ -48,7 +47,7 @@ public Decision canForceAllocatePrimary(ShardRouting shardRouting, RoutingNode n } private Decision allowAllocation(IndexMetadata indexMetadata, RoutingAllocation allocation) { - if (SearchableSnapshotsSettings.isSearchableSnapshotStore(indexMetadata.getSettings())) { + if (indexMetadata.isSearchableSnapshot()) { if (hasValidLicenseSupplier.getAsBoolean()) { return allocation.decision(Decision.YES, NAME, "valid license for searchable snapshots"); } else { diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/decider/SearchableSnapshotEnableAllocationDecider.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/decider/SearchableSnapshotEnableAllocationDecider.java index c2a02acb3b8ba..2686f1d91e377 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/decider/SearchableSnapshotEnableAllocationDecider.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/decider/SearchableSnapshotEnableAllocationDecider.java @@ -17,7 +17,6 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.snapshots.SearchableSnapshotsSettings; public class SearchableSnapshotEnableAllocationDecider extends AllocationDecider { @@ -64,7 +63,7 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing @Override public Decision canAllocate(ShardRouting shardRouting, RoutingAllocation allocation) { final IndexMetadata indexMetadata = allocation.metadata().getIndexSafe(shardRouting.index()); - if (SearchableSnapshotsSettings.isSearchableSnapshotStore(indexMetadata.getSettings())) { + if (indexMetadata.isSearchableSnapshot()) { EnableAllocationDecider.Allocation enableAllocationCopy = this.enableAllocation; boolean allocateOnRollingRestartCopy = this.allocateOnRollingRestart; if (enableAllocationCopy == EnableAllocationDecider.Allocation.PRIMARIES) { diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/decider/SearchableSnapshotRepositoryExistsAllocationDecider.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/decider/SearchableSnapshotRepositoryExistsAllocationDecider.java index 466e0e631d910..171a6cc9d62b9 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/decider/SearchableSnapshotRepositoryExistsAllocationDecider.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/decider/SearchableSnapshotRepositoryExistsAllocationDecider.java @@ -17,7 +17,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.snapshots.SearchableSnapshotsSettings; import java.util.List; @@ -56,8 +55,8 @@ public Decision canAllocate(IndexMetadata indexMetadata, RoutingNode node, Routi } private static Decision allowAllocation(IndexMetadata indexMetadata, RoutingAllocation allocation) { - final Settings settings = indexMetadata.getSettings(); - if (SearchableSnapshotsSettings.isSearchableSnapshotStore(settings)) { + if (indexMetadata.isSearchableSnapshot()) { + final Settings settings = indexMetadata.getSettings(); final RepositoriesMetadata repositoriesMetadata = allocation.metadata().custom(RepositoriesMetadata.TYPE); if (repositoriesMetadata == null || repositoriesMetadata.repositories().isEmpty()) { diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/BlobStoreCacheMaintenanceService.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/BlobStoreCacheMaintenanceService.java index 594f3becb8820..b382ca598c1be 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/BlobStoreCacheMaintenanceService.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/BlobStoreCacheMaintenanceService.java @@ -61,7 +61,6 @@ import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.fetch.subphase.FieldAndFormat; import org.elasticsearch.search.sort.ShardDocSortField; -import org.elasticsearch.snapshots.SearchableSnapshotsSettings; import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.ThreadPool; @@ -256,8 +255,8 @@ private ShardRouting systemIndexPrimaryShard(final ClusterState state) { private static boolean hasSearchableSnapshotWith(final ClusterState state, final String snapshotId, final String indexId) { for (IndexMetadata indexMetadata : state.metadata()) { - final Settings indexSettings = indexMetadata.getSettings(); - if (SearchableSnapshotsSettings.isSearchableSnapshotStore(indexSettings)) { + if (indexMetadata.isSearchableSnapshot()) { + final Settings indexSettings = indexMetadata.getSettings(); if (Objects.equals(snapshotId, SNAPSHOT_SNAPSHOT_ID_SETTING.get(indexSettings)) && Objects.equals(indexId, SNAPSHOT_INDEX_ID_SETTING.get(indexSettings))) { return true; @@ -270,8 +269,8 @@ private static boolean hasSearchableSnapshotWith(final ClusterState state, final private static Map> listSearchableSnapshots(final ClusterState state) { Map> snapshots = null; for (IndexMetadata indexMetadata : state.metadata()) { - final Settings indexSettings = indexMetadata.getSettings(); - if (SearchableSnapshotsSettings.isSearchableSnapshotStore(indexSettings)) { + if (indexMetadata.isSearchableSnapshot()) { + final Settings indexSettings = indexMetadata.getSettings(); if (snapshots == null) { snapshots = new HashMap<>(); } @@ -312,10 +311,10 @@ protected void doRun() { assert indexMetadata != null || state.metadata().indexGraveyard().containsIndex(deletedIndex) : "no previous metadata found for " + deletedIndex; if (indexMetadata != null) { - final Settings indexSetting = indexMetadata.getSettings(); - if (SearchableSnapshotsSettings.isSearchableSnapshotStore(indexSetting)) { + if (indexMetadata.isSearchableSnapshot()) { assert state.metadata().hasIndex(deletedIndex) == false; + final Settings indexSetting = indexMetadata.getSettings(); final String snapshotId = SNAPSHOT_SNAPSHOT_ID_SETTING.get(indexSetting); final String indexId = SNAPSHOT_INDEX_ID_SETTING.get(indexSetting); diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/upgrade/SearchableSnapshotIndexMetadataUpgrader.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/upgrade/SearchableSnapshotIndexMetadataUpgrader.java index 884cac10b46c4..a512c732678bd 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/upgrade/SearchableSnapshotIndexMetadataUpgrader.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/upgrade/SearchableSnapshotIndexMetadataUpgrader.java @@ -20,7 +20,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.indices.ShardLimitValidator; -import org.elasticsearch.snapshots.SearchableSnapshotsSettings; import org.elasticsearch.threadpool.ThreadPool; import java.util.concurrent.Executor; @@ -94,8 +93,8 @@ public void onFailure(Exception e) { static boolean needsUpgrade(ClusterState state) { return StreamSupport.stream(state.metadata().spliterator(), false) .filter(imd -> imd.getCreationVersion().onOrAfter(Version.V_7_12_0) && imd.getCreationVersion().before(Version.V_8_0_0)) + .filter(IndexMetadata::isPartialSearchableSnapshot) .map(IndexMetadata::getSettings) - .filter(SearchableSnapshotsSettings::isPartialSearchableSnapshotIndex) .anyMatch(SearchableSnapshotIndexMetadataUpgrader::notFrozenShardLimitGroup); } @@ -106,10 +105,7 @@ static ClusterState upgradeIndices(ClusterState currentState) { Metadata.Builder builder = Metadata.builder(currentState.metadata()); StreamSupport.stream(currentState.metadata().spliterator(), false) .filter(imd -> imd.getCreationVersion().onOrAfter(Version.V_7_12_0) && imd.getCreationVersion().before(Version.V_8_0_0)) - .filter( - imd -> SearchableSnapshotsSettings.isPartialSearchableSnapshotIndex(imd.getSettings()) - && notFrozenShardLimitGroup(imd.getSettings()) - ) + .filter(imd -> imd.isPartialSearchableSnapshot() && notFrozenShardLimitGroup(imd.getSettings())) .map(SearchableSnapshotIndexMetadataUpgrader::setShardLimitGroupFrozen) .forEach(imd -> builder.put(imd, true)); return ClusterState.builder(currentState).metadata(builder).build(); diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/upgrade/SearchableSnapshotIndexMetadataUpgraderTests.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/upgrade/SearchableSnapshotIndexMetadataUpgraderTests.java index 81ab54ade5f37..6571f46cb1fd3 100644 --- a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/upgrade/SearchableSnapshotIndexMetadataUpgraderTests.java +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/upgrade/SearchableSnapshotIndexMetadataUpgraderTests.java @@ -74,13 +74,13 @@ public void testUpgradeIndices() { assertTrue(StreamSupport.stream(upgradedState.metadata().spliterator(), false).anyMatch(upgraded -> { IndexMetadata original = originalState.metadata().index(upgraded.getIndex()); assertThat(original, notNullValue()); - if (isPartial(upgraded) == false + if (upgraded.isPartialSearchableSnapshot() == false || ShardLimitValidator.INDEX_SETTING_SHARD_LIMIT_GROUP.get(original.getSettings()) .equals(ShardLimitValidator.FROZEN_GROUP)) { assertThat(upgraded, sameInstance(original)); return false; } else { - assertThat(isPartial(upgraded), is(isPartial(original))); + assertThat(upgraded.isPartialSearchableSnapshot(), is(original.isPartialSearchableSnapshot())); assertThat(upgraded.getNumberOfShards(), equalTo(original.getNumberOfShards())); assertThat(upgraded.getNumberOfReplicas(), equalTo(original.getNumberOfReplicas())); assertThat( @@ -189,7 +189,4 @@ private ClusterState clusterState(Metadata.Builder metadataBuilder) { return ClusterState.builder(ClusterName.DEFAULT).metadata(metadataBuilder).build(); } - private boolean isPartial(IndexMetadata upgraded) { - return SearchableSnapshotsSettings.isPartialSearchableSnapshotIndex(upgraded.getSettings()); - } } From 5707b656821d53b380de21adc7039f04c60e8802 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Thu, 27 Jan 2022 09:47:27 +0100 Subject: [PATCH 062/100] Copy old mappings to _meta section (#83041) For archival indices, where mappings might not be parseable by new ES versions anymore, copy the mapping to the `_meta/legacy_mappings` section. Relates #81210 --- .../cluster/metadata/IndexMetadata.java | 42 ++++++++++++-- .../cluster/metadata/MappingMetadata.java | 10 ++++ .../snapshots/RestoreService.java | 33 +++++++++++ .../oldrepos/OldRepositoryAccessIT.java | 55 +++++++++++++++++-- 4 files changed, 130 insertions(+), 10 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java index 77f3c2cceb747..efef1d121bc25 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java @@ -1932,8 +1932,19 @@ public static IndexMetadata legacyFromXContent(XContentParser parser) throws IOE } builder.settings(settings); } else if ("mappings".equals(currentFieldName)) { - // don't try to parse these for now - parser.skipChildren(); + MapBuilder mappingSourceBuilder = MapBuilder.newMapBuilder(); + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.START_OBJECT) { + String mappingType = currentFieldName; + mappingSourceBuilder.put(mappingType, parser.mapOrdered()); + } else { + throw new IllegalArgumentException("Unexpected token: " + token); + } + } + Map mapping = mappingSourceBuilder.map(); + handleLegacyMapping(builder, mapping); } else if ("in_sync_allocations".equals(currentFieldName)) { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { @@ -1957,8 +1968,18 @@ public static IndexMetadata legacyFromXContent(XContentParser parser) throws IOE } } else if (token == XContentParser.Token.START_ARRAY) { if ("mappings".equals(currentFieldName)) { - // don't try to parse these for now - parser.skipChildren(); + MapBuilder mappingSourceBuilder = MapBuilder.newMapBuilder(); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + Map mapping; + if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) { + CompressedXContent compressedXContent = new CompressedXContent(parser.binaryValue()); + mapping = XContentHelper.convertToMap(compressedXContent.compressedReference(), true).v2(); + } else { + mapping = parser.mapOrdered(); + } + mappingSourceBuilder.putAll(mapping); + } + handleLegacyMapping(builder, mappingSourceBuilder.map()); } else { parser.skipChildren(); } @@ -1982,12 +2003,23 @@ public static IndexMetadata legacyFromXContent(XContentParser parser) throws IOE } XContentParserUtils.ensureExpectedToken(XContentParser.Token.END_OBJECT, parser.nextToken(), parser); - builder.putMapping(MappingMetadata.EMPTY_MAPPINGS); // just make sure it's not empty so that _source can be read + if (builder.mapping() == null) { + builder.putMapping(MappingMetadata.EMPTY_MAPPINGS); // just make sure it's not empty so that _source can be read + } IndexMetadata indexMetadata = builder.build(); assert indexMetadata.getCreationVersion().before(Version.CURRENT.minimumIndexCompatibilityVersion()); return indexMetadata; } + + private static void handleLegacyMapping(Builder builder, Map mapping) { + if (mapping.size() == 1) { + String mappingType = mapping.keySet().iterator().next(); + builder.putMapping(new MappingMetadata(mappingType, mapping)); + } else if (mapping.size() > 1) { + builder.putMapping(new MappingMetadata(MapperService.SINGLE_MAPPING_NAME, mapping)); + } + } } /** diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MappingMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MappingMetadata.java index cfe8a27074b60..ebd440dfc6c5c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MappingMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MappingMetadata.java @@ -141,6 +141,16 @@ public Map getSourceAsMap() throws ElasticsearchParseException { return sourceAsMap(); } + /** + * Converts the serialized compressed form of the mappings into a parsed map. + * In contrast to {@link #sourceAsMap()}, this does not remove the type + */ + @SuppressWarnings("unchecked") + public Map rawSourceAsMap() throws ElasticsearchParseException { + Map mapping = XContentHelper.convertToMap(source.compressedReference(), true).v2(); + return mapping; + } + public boolean routingRequired() { return this.routingRequired; } diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java index 3950597155f82..f574fb5b00feb 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -32,6 +32,7 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexMetadataVerifier; import org.elasticsearch.cluster.metadata.IndexTemplateMetadata; +import org.elasticsearch.cluster.metadata.MappingMetadata; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; import org.elasticsearch.cluster.metadata.MetadataDeleteIndexService; @@ -78,6 +79,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.LinkedHashMap; import java.util.List; import java.util.Locale; import java.util.Map; @@ -1294,6 +1296,11 @@ public ClusterState execute(ClusterState currentState) { request.indexSettings(), request.ignoreIndexSettings() ); + if (snapshotIndexMetadata.getCreationVersion() + .before(currentState.getNodes().getMaxNodeVersion().minimumIndexCompatibilityVersion())) { + // adapt index metadata so that it can be understood by current version + snapshotIndexMetadata = convertLegacyIndex(snapshotIndexMetadata); + } try { snapshotIndexMetadata = indexMetadataVerifier.verifyIndexMetadata(snapshotIndexMetadata, minIndexCompatibilityVersion); } catch (Exception ex) { @@ -1582,6 +1589,32 @@ public void clusterStateProcessed(ClusterState oldState, ClusterState newState) } } + private IndexMetadata convertLegacyIndex(IndexMetadata snapshotIndexMetadata) { + MappingMetadata mappingMetadata = snapshotIndexMetadata.mapping(); + Map loadedMappingSource = mappingMetadata.rawSourceAsMap(); + + // store old mapping under _meta/legacy_mappings + Map legacyMapping = new LinkedHashMap<>(); + boolean sourceOnlySnapshot = snapshotIndexMetadata.getSettings().getAsBoolean("index.source_only", false); + if (sourceOnlySnapshot) { + // actual mapping is under "_meta" (but strip type first) + Object sourceOnlyMeta = mappingMetadata.sourceAsMap().get("_meta"); + if (sourceOnlyMeta instanceof Map sourceOnlyMetaMap) { + legacyMapping.put("legacy_mappings", sourceOnlyMetaMap); + } + } else { + legacyMapping.put("legacy_mappings", loadedMappingSource); + } + + Map newMappingSource = new LinkedHashMap<>(); + newMappingSource.put("_meta", legacyMapping); + + Map newMapping = new LinkedHashMap<>(); + newMapping.put(mappingMetadata.type(), newMappingSource); + // TODO: _routing? Perhaps we don't need to obey any routing here as stuff is read-only anyway and get API will be disabled + return IndexMetadata.builder(snapshotIndexMetadata).putMapping(new MappingMetadata(mappingMetadata.type(), newMapping)).build(); + } + private static IndexMetadata.Builder restoreToCreateNewIndex(IndexMetadata snapshotIndexMetadata, String renamedIndexName) { return IndexMetadata.builder(snapshotIndexMetadata) .state(IndexMetadata.State.OPEN) diff --git a/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java b/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java index 3bddc60b36449..5b3fb0a331367 100644 --- a/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java +++ b/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java @@ -25,11 +25,14 @@ import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.client.indices.CloseIndexRequest; +import org.elasticsearch.client.indices.GetMappingsRequest; import org.elasticsearch.client.indices.PutMappingRequest; import org.elasticsearch.client.searchable_snapshots.MountSnapshotRequest; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.MappingMetadata; +import org.elasticsearch.cluster.routing.Murmur3HashFunction; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; @@ -59,8 +62,13 @@ import java.util.Set; import java.util.stream.Collectors; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.startsWith; public class OldRepositoryAccessIT extends ESRestTestCase { @Override @@ -127,7 +135,9 @@ && randomBoolean()) { for (int i = 0; i < numDocs + extraDocs; i++) { String id = "testdoc" + i; expectedIds.add(id); - Request doc = new Request("PUT", "/test/doc/" + id); + // use multiple types for ES versions < 6.0.0 + String type = "doc" + (oldVersion.before(Version.fromString("6.0.0")) ? Murmur3HashFunction.hash(id) % 2 : 0); + Request doc = new Request("PUT", "/test/" + type + "/" + id); doc.addParameter("refresh", "true"); doc.setJsonEntity(sourceForDoc(i)); assertOK(oldEs.performRequest(doc)); @@ -136,7 +146,8 @@ && randomBoolean()) { for (int i = 0; i < extraDocs; i++) { String id = randomFrom(expectedIds); expectedIds.remove(id); - Request doc = new Request("DELETE", "/test/doc/" + id); + String type = "doc" + (oldVersion.before(Version.fromString("6.0.0")) ? Murmur3HashFunction.hash(id) % 2 : 0); + Request doc = new Request("DELETE", "/test/" + type + "/" + id); doc.addParameter("refresh", "true"); oldEs.performRequest(doc); } @@ -218,7 +229,7 @@ && randomBoolean()) { if (Build.CURRENT.isSnapshot()) { // restore / mount and check whether searches work - restoreMountAndVerify(numDocs, expectedIds, client, numberOfShards, sourceOnlyRepository); + restoreMountAndVerify(numDocs, expectedIds, client, numberOfShards, sourceOnlyRepository, oldVersion); // close indices assertTrue( @@ -236,7 +247,7 @@ && randomBoolean()) { ); // restore / mount again - restoreMountAndVerify(numDocs, expectedIds, client, numberOfShards, sourceOnlyRepository); + restoreMountAndVerify(numDocs, expectedIds, client, numberOfShards, sourceOnlyRepository, oldVersion); } } finally { IOUtils.closeWhileHandlingException( @@ -266,7 +277,8 @@ private void restoreMountAndVerify( Set expectedIds, RestHighLevelClient client, int numberOfShards, - boolean sourceOnlyRepository + boolean sourceOnlyRepository, + Version oldVersion ) throws IOException { // restore index RestoreSnapshotResponse restoreSnapshotResponse = client.snapshot() @@ -291,6 +303,39 @@ private void restoreMountAndVerify( .getStatus() ); + MappingMetadata mapping = client.indices() + .getMapping(new GetMappingsRequest().indices("restored_test"), RequestOptions.DEFAULT) + .mappings() + .get("restored_test"); + logger.info("mapping for {}: {}", mapping.type(), mapping.source().string()); + Map root = mapping.sourceAsMap(); + assertThat(root, hasKey("_meta")); + assertThat(root.get("_meta"), instanceOf(Map.class)); + @SuppressWarnings("unchecked") + Map meta = (Map) root.get("_meta"); + assertThat(meta, hasKey("legacy_mappings")); + assertThat(meta.get("legacy_mappings"), instanceOf(Map.class)); + @SuppressWarnings("unchecked") + Map legacyMappings = (Map) meta.get("legacy_mappings"); + assertThat(legacyMappings.keySet(), not(empty())); + for (Map.Entry entry : legacyMappings.entrySet()) { + String type = entry.getKey(); + assertThat(type, startsWith("doc")); + assertThat(entry.getValue(), instanceOf(Map.class)); + @SuppressWarnings("unchecked") + Map legacyMapping = (Map) entry.getValue(); + assertThat(legacyMapping, hasKey("properties")); + assertThat(legacyMapping.get("properties"), instanceOf(Map.class)); + @SuppressWarnings("unchecked") + Map propertiesMapping = (Map) legacyMapping.get("properties"); + assertThat(propertiesMapping, hasKey("val")); + assertThat(propertiesMapping.get("val"), instanceOf(Map.class)); + @SuppressWarnings("unchecked") + Map valMapping = (Map) propertiesMapping.get("val"); + assertThat(valMapping, hasKey("type")); + assertEquals("long", valMapping.get("type")); + } + // run a search against the index assertDocs("restored_test", numDocs, expectedIds, client, sourceOnlyRepository); From fd9887e58b2d849ee6254edce3d28f5f80f3d9cc Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Thu, 27 Jan 2022 09:57:47 +0100 Subject: [PATCH 063/100] Cleanup jackson build tool dependencies (#83164) - use jackson platform - use compatible schema validator version --- build-tools-internal/build.gradle | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/build-tools-internal/build.gradle b/build-tools-internal/build.gradle index 9b647270e54ea..87a968558ab21 100644 --- a/build-tools-internal/build.gradle +++ b/build-tools-internal/build.gradle @@ -202,14 +202,9 @@ configurations { integTestRuntimeOnly.extendsFrom(testRuntimeOnly) } dependencies { - constraints { - api("com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:${versions.getProperty('jackson')}") { - version { - strictly "${versions.getProperty('jackson')}" - } - because 'We want to use the exact same jackson version we use in production' - } - } + components.all(JacksonAlignmentRule) + // Forcefully downgrade the jackson platform as used in production + api enforcedPlatform("com.fasterxml.jackson:jackson-bom:${versions.getProperty('jackson')}") api localGroovy() api gradleApi() @@ -230,7 +225,8 @@ dependencies { api 'de.thetaphi:forbiddenapis:3.2' api 'com.avast.gradle:gradle-docker-compose-plugin:0.14.13' api 'org.apache.maven:maven-model:3.6.2' - api 'com.networknt:json-schema-validator:1.0.65' + // needs to match the jackson minor version in use + api 'com.networknt:json-schema-validator:1.0.49' api "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:${versions.getProperty('jackson')}" api 'org.ow2.asm:asm:9.2' api 'org.ow2.asm:asm-tree:9.2' @@ -298,3 +294,14 @@ tasks.register("bootstrapPerformanceTests", Copy) { def root = file('..') filter(ReplaceTokens, tokens: [testGitCommit:GitInfo.gitInfo(root).revision]) } + +abstract class JacksonAlignmentRule implements ComponentMetadataRule { + void execute(ComponentMetadataContext ctx) { + ctx.details.with {d -> + if (d.id.group.startsWith("com.fasterxml.jackson")) { + // declare that Jackson modules all belong to the Jackson virtual platform + belongsTo("com.fasterxml.jackson:jackson-bom:${d.id.version}") + } + } + } +} \ No newline at end of file From 514bec19eb9ffc16bd2a161f0856baa5d671b93f Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Thu, 27 Jan 2022 11:03:08 +0200 Subject: [PATCH 064/100] Fix and unmute package upgrade tests (#83043) Fixes PackageUpgradeTests --- .../packaging/test/PackageUpgradeTests.java | 42 +++++++++++++++---- ...ackagesSecurityAutoConfigurationTests.java | 22 +++++----- .../packaging/test/PackagingTestCase.java | 33 ++++----------- 3 files changed, 53 insertions(+), 44 deletions(-) diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/PackageUpgradeTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/PackageUpgradeTests.java index 7275e9d6ca283..1aff7fc6ab791 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/PackageUpgradeTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/PackageUpgradeTests.java @@ -12,11 +12,16 @@ import org.apache.http.entity.ContentType; import org.elasticsearch.Version; import org.elasticsearch.packaging.util.Distribution; +import org.elasticsearch.packaging.util.FileUtils; +import org.elasticsearch.packaging.util.Installation; import org.elasticsearch.packaging.util.Packages; import org.elasticsearch.packaging.util.ServerUtils; import org.junit.BeforeClass; +import java.io.IOException; +import java.nio.file.Files; import java.nio.file.Paths; +import java.util.List; import static org.elasticsearch.packaging.util.Packages.assertInstalled; import static org.elasticsearch.packaging.util.Packages.installPackage; @@ -41,9 +46,10 @@ public static void filterVersions() { public void test10InstallBwcVersion() throws Exception { installation = installPackage(sh, bwcDistribution); assertInstalled(bwcDistribution); - // TODO: Add more tests here to assert behavior when updating from < v8 to > v8 with implicit/explicit behavior, - // maybe as part of https://github.com/elastic/elasticsearch/pull/76879 - ServerUtils.disableSecurityFeatures(installation); + // TODO Modify tests below to work with security when BWC version is after 8.0.0 + if (Version.fromString(bwcDistribution.baseVersion).onOrAfter(Version.V_8_0_0)) { + possiblyRemoveSecurityConfiguration(installation); + } } public void test11ModifyKeystore() throws Exception { @@ -84,22 +90,24 @@ public void test12SetupBwcVersion() throws Exception { stopElasticsearch(); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/79950") public void test20InstallUpgradedVersion() throws Exception { if (bwcDistribution.path.equals(distribution.path)) { // the old and new distributions are the same, so we are testing force upgrading installation = Packages.forceUpgradePackage(sh, distribution); } else { installation = Packages.upgradePackage(sh, distribution); - verifySecurityNotAutoConfigured(installation); } + // We add this so that we don't trigger the SecurityImplicitBehaviorBootstrapCheck in 8 + if (Version.fromString(bwcDistribution.baseVersion).before(Version.V_8_0_0) + && Version.fromString(distribution.baseVersion).onOrAfter(Version.V_8_0_0)) { + ServerUtils.addSettingToExistingConfiguration(installation, "xpack.security.enabled", "false"); + } + assertInstalled(distribution); verifyPackageInstallation(installation, distribution, sh); - // Upgrade overwrites the configuration file because we run with --force-confnew so we need to disable security again - ServerUtils.disableSecurityFeatures(installation); + verifySecurityNotAutoConfigured(installation); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/76283") public void test21CheckUpgradedVersion() throws Exception { assertWhileRunning(() -> { assertDocsExist(); }); } @@ -112,4 +120,22 @@ private void assertDocsExist() throws Exception { String response3 = ServerUtils.makeRequest(Request.Get("http://localhost:9200/library2/_doc/1?pretty")); assertThat(response3, containsString("Darkness")); } + + private void possiblyRemoveSecurityConfiguration(Installation es) throws IOException { + ServerUtils.disableSecurityFeatures(es); + if (Files.exists(es.config("certs"))) { + FileUtils.rm(es.config("certs")); + } + // remove security auto-configuration entries, in case bwc was > 8, since we disable security + for (String entry : List.of( + "xpack.security.transport.ssl.keystore.secure_password", + "xpack.security.transport.ssl.truststore.secure_password", + "xpack.security.http.ssl.keystore.secure_password", + "autoconfiguration.password_hash" + )) { + if (es.executables().keystoreTool.run("list").stdout().contains(entry)) { + es.executables().keystoreTool.run("remove " + entry); + } + } + } } diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/PackagesSecurityAutoConfigurationTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/PackagesSecurityAutoConfigurationTests.java index b84dd871157c3..fa68da1725edc 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/PackagesSecurityAutoConfigurationTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/PackagesSecurityAutoConfigurationTests.java @@ -29,7 +29,6 @@ import java.nio.file.StandardCopyOption; import java.security.SecureRandom; import java.util.List; -import java.util.Optional; import java.util.function.Predicate; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -52,12 +51,13 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; -import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.junit.Assume.assumeTrue; public class PackagesSecurityAutoConfigurationTests extends PackagingTestCase { + private static final String AUTOCONFIG_DIRNAME = "certs"; + @BeforeClass public static void filterDistros() { assumeTrue("rpm or deb", distribution.isPackage()); @@ -75,15 +75,13 @@ public void test10SecurityAutoConfiguredOnPackageInstall() throws Exception { public void test20SecurityNotAutoConfiguredOnReInstallation() throws Exception { // we are testing force upgrading in the current version // In such a case, security remains configured from the initial installation, we don't run it again. - Optional autoConfigDirName = getAutoConfigDirName(installation); + byte[] transportKeystore = Files.readAllBytes(installation.config(AUTOCONFIG_DIRNAME).resolve("transport.p12")); installation = Packages.forceUpgradePackage(sh, distribution); assertInstalled(distribution); verifyPackageInstallation(installation, distribution, sh); verifySecurityAutoConfigured(installation); - // Since we did not auto-configure the second time, the directory name should be the same - assertThat(autoConfigDirName.isPresent(), is(true)); - assertThat(getAutoConfigDirName(installation).isPresent(), is(true)); - assertThat(getAutoConfigDirName(installation).get(), equalTo(autoConfigDirName.get())); + // Since we did not auto-configure the second time, the keystore should be the one we generated the first time, above + assertThat(transportKeystore, equalTo(Files.readAllBytes(installation.config(AUTOCONFIG_DIRNAME).resolve("transport.p12")))); } public void test30SecurityNotAutoConfiguredWhenExistingDataDir() throws Exception { @@ -161,9 +159,8 @@ public void test70ReconfigureFailsWhenTlsAutoConfDirMissing() throws Exception { verifySecurityAutoConfigured(installation); assertNotNull(installation.getElasticPassword()); - Optional autoConfigDirName = getAutoConfigDirName(installation); // Move instead of delete because Files.deleteIfExists bails on non empty dirs - Files.move(installation.config(autoConfigDirName.get()), installation.config("temp-autoconf-dir")); + Files.move(installation.config(AUTOCONFIG_DIRNAME), installation.config("temp-autoconf-dir")); Shell.Result result = installation.executables().nodeReconfigureTool.run("--enrollment-token a-token", "y", true); assertThat(result.exitCode(), equalTo(ExitCodes.USAGE)); // } @@ -312,10 +309,13 @@ public void test73ReconfigureCreatesFilesWithCorrectPermissions() throws Excepti true ); assertThat(result.exitCode(), CoreMatchers.equalTo(0)); - assertThat(installation.config("certs"), FileMatcher.file(Directory, "root", "elasticsearch", p750)); + assertThat(installation.config(AUTOCONFIG_DIRNAME), FileMatcher.file(Directory, "root", "elasticsearch", p750)); Stream.of("http.p12", "http_ca.crt", "transport.p12") .forEach( - file -> assertThat(installation.config("certs").resolve(file), FileMatcher.file(File, "root", "elasticsearch", p660)) + file -> assertThat( + installation.config(AUTOCONFIG_DIRNAME).resolve(file), + FileMatcher.file(File, "root", "elasticsearch", p660) + ) ); } } diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java index 644219572e4e3..fa8053ba5d5bf 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java @@ -63,11 +63,9 @@ import java.nio.file.attribute.FileAttribute; import java.nio.file.attribute.PosixFilePermissions; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Locale; -import java.util.Optional; import java.util.concurrent.TimeUnit; import java.util.function.Predicate; import java.util.stream.Collectors; @@ -629,22 +627,21 @@ public static void assertBusy(CheckedRunnable codeBlock, long maxWait * @param es the {@link Installation} to check */ public void verifySecurityAutoConfigured(Installation es) throws Exception { - Optional autoConfigDirName = getAutoConfigDirName(es); - assertThat(autoConfigDirName.isPresent(), Matchers.is(true)); + final String autoConfigDirName = "certs"; final Settings settings; if (es.distribution.isArchive()) { // We chown the installation on Windows to Administrators so that we can auto-configure it. String owner = Platforms.WINDOWS ? "BUILTIN\\Administrators" : "elasticsearch"; - assertThat(es.config(autoConfigDirName.get()), FileMatcher.file(Directory, owner, owner, p750)); + assertThat(es.config(autoConfigDirName), FileMatcher.file(Directory, owner, owner, p750)); Stream.of("http.p12", "http_ca.crt", "transport.p12") - .forEach(file -> assertThat(es.config(autoConfigDirName.get()).resolve(file), FileMatcher.file(File, owner, owner, p660))); + .forEach(file -> assertThat(es.config(autoConfigDirName).resolve(file), FileMatcher.file(File, owner, owner, p660))); settings = Settings.builder().loadFromPath(es.config("elasticsearch.yml")).build(); } else if (es.distribution.isDocker()) { - assertThat(es.config(autoConfigDirName.get()), DockerFileMatcher.file(Directory, "elasticsearch", "root", p750)); + assertThat(es.config(autoConfigDirName), DockerFileMatcher.file(Directory, "elasticsearch", "root", p750)); Stream.of("http.p12", "http_ca.crt", "transport.p12") .forEach( file -> assertThat( - es.config(autoConfigDirName.get()).resolve(file), + es.config(autoConfigDirName).resolve(file), DockerFileMatcher.file(File, "elasticsearch", "root", p660) ) ); @@ -655,13 +652,10 @@ public void verifySecurityAutoConfigured(Installation es) throws Exception { rm(localTempDir); } else { assert es.distribution.isPackage(); - assertThat(es.config(autoConfigDirName.get()), FileMatcher.file(Directory, "root", "elasticsearch", p750)); + assertThat(es.config(autoConfigDirName), FileMatcher.file(Directory, "root", "elasticsearch", p750)); Stream.of("http.p12", "http_ca.crt", "transport.p12") .forEach( - file -> assertThat( - es.config(autoConfigDirName.get()).resolve(file), - FileMatcher.file(File, "root", "elasticsearch", p660) - ) + file -> assertThat(es.config(autoConfigDirName).resolve(file), FileMatcher.file(File, "root", "elasticsearch", p660)) ); assertThat( sh.run(es.executables().keystoreTool + " list").stdout(), @@ -687,7 +681,7 @@ public void verifySecurityAutoConfigured(Installation es) throws Exception { * @param es the {@link Installation} to check */ public static void verifySecurityNotAutoConfigured(Installation es) throws Exception { - assertThat(getAutoConfigDirName(es).isPresent(), Matchers.is(false)); + assertThat(Files.exists(es.config("certs")), Matchers.is(false)); if (es.distribution.isPackage()) { if (Files.exists(es.config("elasticsearch.keystore"))) { assertThat( @@ -707,15 +701,4 @@ public static void verifySecurityNotAutoConfigured(Installation es) throws Excep } } - public static Optional getAutoConfigDirName(Installation es) { - final Shell.Result lsResult; - if (es.distribution.platform.equals(Distribution.Platform.WINDOWS)) { - lsResult = sh.run("Get-ChildItem -Path " + es.config + " -Name"); - } else { - lsResult = sh.run("find \"" + es.config + "\" -type d -maxdepth 1"); - } - assertNotNull(lsResult.stdout()); - return Arrays.stream(lsResult.stdout().split("\n")).filter(f -> f.contains("certs")).findFirst(); - } - } From 847dd43f941b83b4491d0ed0ccf5eac9184386cd Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 27 Jan 2022 09:06:47 +0000 Subject: [PATCH 065/100] Fix NodeMetadataTests#testEqualsHashcodeSerialization (#83170) The mutation function would rarely fail to mutate the input object. This commit fixes that. Relates #82689 --- .../elasticsearch/env/NodeMetadataTests.java | 37 ++++++++----------- 1 file changed, 16 insertions(+), 21 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/env/NodeMetadataTests.java b/server/src/test/java/org/elasticsearch/env/NodeMetadataTests.java index 0f10ea2b6548c..64148b9d2afd5 100644 --- a/server/src/test/java/org/elasticsearch/env/NodeMetadataTests.java +++ b/server/src/test/java/org/elasticsearch/env/NodeMetadataTests.java @@ -46,27 +46,22 @@ public void testEqualsHashcodeSerialization() { assertThat(nodeMetadataLongTuple.v2(), equalTo(generation)); return nodeMetadataLongTuple.v1(); }, - nodeMetadata -> { - switch (randomInt(3)) { - case 0: - return new NodeMetadata( - randomAlphaOfLength(21 - nodeMetadata.nodeId().length()), - nodeMetadata.nodeVersion(), - Version.CURRENT - ); - case 1: - return new NodeMetadata( - nodeMetadata.nodeId(), - randomValueOtherThan(nodeMetadata.nodeVersion(), this::randomVersion), - Version.CURRENT - ); - default: - return new NodeMetadata( - nodeMetadata.nodeId(), - nodeMetadata.nodeVersion(), - randomValueOtherThan(Version.CURRENT, this::randomVersion) - ); - } + nodeMetadata -> switch (randomInt(3)) { + case 0 -> new NodeMetadata( + randomAlphaOfLength(21 - nodeMetadata.nodeId().length()), + nodeMetadata.nodeVersion(), + nodeMetadata.oldestIndexVersion() + ); + case 1 -> new NodeMetadata( + nodeMetadata.nodeId(), + randomValueOtherThan(nodeMetadata.nodeVersion(), this::randomVersion), + nodeMetadata.oldestIndexVersion() + ); + default -> new NodeMetadata( + nodeMetadata.nodeId(), + nodeMetadata.nodeVersion(), + randomValueOtherThan(nodeMetadata.oldestIndexVersion(), this::randomVersion) + ); } ); } From 1e1f57d0945fd66d927c77f584406b4b282458ee Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Thu, 27 Jan 2022 10:59:06 +0100 Subject: [PATCH 066/100] Add index_mode to data streams (#82621) This adds a `index_mode` field to data stream snippet in a composable index template. Specifying `index_mode` is optional. If `index_mode` on a composable index template then this also sets the `index_mode` property on the data stream and the `index.mode` index setting on the backing indices created by this data stream. An `index_mode` of value `TIME_SERIES` on a data stream also triggers time series validation. In particular the temporal ranges of backing indices are checked on whether these ranges do not overlap. Relates to #74660 --- .../CreateIndexClusterStateUpdateRequest.java | 20 +++ .../rollover/MetadataRolloverService.java | 6 +- .../TransportSimulateIndexTemplateAction.java | 1 + .../metadata/ComposableIndexTemplate.java | 34 ++++- .../cluster/metadata/DataStream.java | 115 ++++++++++++++--- .../cluster/metadata/IndexAbstraction.java | 3 +- .../cluster/metadata/Metadata.java | 13 +- .../MetadataCreateDataStreamService.java | 8 +- .../metadata/MetadataCreateIndexService.java | 8 ++ .../MetadataIndexTemplateService.java | 1 + .../cluster/routing/allocation/DataTier.java | 2 + .../org/elasticsearch/index/IndexMode.java | 35 +++++- .../index/IndexSettingProvider.java | 10 +- .../snapshots/RestoreService.java | 3 +- .../metadata/DataStreamTemplateTests.java | 4 +- .../cluster/metadata/DataStreamTests.java | 92 +++++++++++++- .../IndexNameExpressionResolverTests.java | 3 +- .../metadata/DataStreamTestHelper.java | 20 ++- .../ccr/action/TransportPutFollowAction.java | 6 +- .../action/AutoFollowCoordinatorTests.java | 3 +- .../action/TransportPutFollowActionTests.java | 16 ++- .../core/ilm/LifecyclePolicyUtilsTests.java | 4 +- .../datastreams/TsdbDataStreamRestIT.java | 5 +- .../datastreams/DataStreamIT.java | 10 +- .../DataStreamIndexSettingsProvider.java | 60 ++++----- .../DataStreamIndexSettingsProviderTests.java | 117 ++++++++++++------ .../datastreams/DataStreamsStatsTests.java | 2 +- ...etadataDataStreamRolloverServiceTests.java | 14 ++- .../rollup/v2/TransportRollupAction.java | 3 +- 29 files changed, 490 insertions(+), 128 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java index a53ff9133cda3..9e3cb34e9a5df 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java @@ -13,6 +13,7 @@ import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.cluster.ack.ClusterStateUpdateRequest; import org.elasticsearch.cluster.block.ClusterBlock; +import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; @@ -48,6 +49,8 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ private boolean performReroute = true; + private ComposableIndexTemplate matchingTemplate; + public CreateIndexClusterStateUpdateRequest(String cause, String index, String providedName) { this.cause = cause; this.index = index; @@ -186,6 +189,21 @@ public CreateIndexClusterStateUpdateRequest performReroute(boolean performRerout return this; } + /** + * @return The composable index template that matches with the index that will be cretaed by this request. + */ + public ComposableIndexTemplate matchingTemplate() { + return matchingTemplate; + } + + /** + * Sets the composable index template that matches with index that will be created by this request. + */ + public CreateIndexClusterStateUpdateRequest setMatchingTemplate(ComposableIndexTemplate matchingTemplate) { + this.matchingTemplate = matchingTemplate; + return this; + } + @Override public String toString() { return "CreateIndexClusterStateUpdateRequest{" @@ -217,6 +235,8 @@ public String toString() { + waitForActiveShards + ", systemDataStreamDescriptor=" + systemDataStreamDescriptor + + ", matchingTemplate=" + + matchingTemplate + '}'; } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java index 4f08d6a50eda4..a0b0f7341ce10 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java @@ -14,6 +14,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.AliasAction; import org.elasticsearch.cluster.metadata.AliasMetadata; +import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -242,15 +243,17 @@ private RolloverResult rolloverDataStream( ); } + final ComposableIndexTemplate templateV2; final SystemDataStreamDescriptor systemDataStreamDescriptor; if (dataStream.isSystem() == false) { systemDataStreamDescriptor = null; - lookupTemplateForDataStream(dataStreamName, currentState.metadata()); + templateV2 = lookupTemplateForDataStream(dataStreamName, currentState.metadata()); } else { systemDataStreamDescriptor = systemIndices.findMatchingDataStreamDescriptor(dataStreamName); if (systemDataStreamDescriptor == null) { throw new IllegalArgumentException("no system data stream descriptor found for data stream [" + dataStreamName + "]"); } + templateV2 = systemDataStreamDescriptor.getComposableIndexTemplate(); } final DataStream ds = dataStream.getDataStream(); @@ -270,6 +273,7 @@ private RolloverResult rolloverDataStream( systemDataStreamDescriptor, now ); + createIndexClusterStateRequest.setMatchingTemplate(templateV2); ClusterState newState = createIndexService.applyCreateIndexRequest( currentState, createIndexClusterStateRequest, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java index d72e28058c8f4..4e4abf003e6b4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java @@ -213,6 +213,7 @@ public static Template resolveTemplate( Settings result = provider.getAdditionalIndexSettings( indexName, template.getDataStreamTemplate() != null ? indexName : null, + template.getDataStreamTemplate() != null ? template.getDataStreamTemplate().getIndexMode() : null, simulatedState.getMetadata(), System.currentTimeMillis(), templateSettings diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java index f7f5f84ab93b4..cd6edd507dc95 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Nullable; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.mapper.DataStreamTimestampFieldMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.xcontent.ConstructingObjectParser; @@ -305,28 +306,35 @@ public static class DataStreamTemplate implements Writeable, ToXContentObject { private static final ParseField HIDDEN = new ParseField("hidden"); private static final ParseField ALLOW_CUSTOM_ROUTING = new ParseField("allow_custom_routing"); + private static final ParseField INDEX_MODE = new ParseField("index_mode"); public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "data_stream_template", false, - a -> new DataStreamTemplate(a[0] != null && (boolean) a[0], a[1] != null && (boolean) a[1]) + args -> { + IndexMode indexMode = args[2] != null ? IndexMode.fromString((String) args[2]) : null; + return new DataStreamTemplate(args[0] != null && (boolean) args[0], args[1] != null && (boolean) args[1], indexMode); + } ); static { PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), HIDDEN); PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), ALLOW_CUSTOM_ROUTING); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), INDEX_MODE); } private final boolean hidden; private final boolean allowCustomRouting; + private final IndexMode indexMode; public DataStreamTemplate() { - this(false, false); + this(false, false, null); } - public DataStreamTemplate(boolean hidden, boolean allowCustomRouting) { + public DataStreamTemplate(boolean hidden, boolean allowCustomRouting, IndexMode indexMode) { this.hidden = hidden; this.allowCustomRouting = allowCustomRouting; + this.indexMode = indexMode; } DataStreamTemplate(StreamInput in) throws IOException { @@ -336,6 +344,11 @@ public DataStreamTemplate(boolean hidden, boolean allowCustomRouting) { } else { allowCustomRouting = false; } + if (in.getVersion().onOrAfter(Version.V_8_1_0)) { + indexMode = in.readOptionalEnum(IndexMode.class); + } else { + indexMode = null; + } } public String getTimestampField() { @@ -368,12 +381,20 @@ public boolean isAllowCustomRouting() { return allowCustomRouting; } + @Nullable + public IndexMode getIndexMode() { + return indexMode; + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(hidden); if (out.getVersion().onOrAfter(Version.V_8_0_0)) { out.writeBoolean(allowCustomRouting); } + if (out.getVersion().onOrAfter(Version.V_8_1_0)) { + out.writeOptionalEnum(indexMode); + } } @Override @@ -381,6 +402,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); builder.field("hidden", hidden); builder.field(ALLOW_CUSTOM_ROUTING.getPreferredName(), allowCustomRouting); + if (indexMode != null) { + builder.field(INDEX_MODE.getPreferredName(), indexMode); + } builder.endObject(); return builder; } @@ -390,12 +414,12 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; DataStreamTemplate that = (DataStreamTemplate) o; - return hidden == that.hidden && allowCustomRouting == that.allowCustomRouting; + return hidden == that.hidden && allowCustomRouting == that.allowCustomRouting && indexMode == that.indexMode; } @Override public int hashCode() { - return Objects.hash(hidden, allowCustomRouting); + return Objects.hash(hidden, allowCustomRouting, indexMode); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java index dc4cbe79f9fb4..4e830cd95a0ae 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java @@ -24,6 +24,7 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; @@ -81,6 +82,7 @@ public final class DataStream extends AbstractDiffable implements To private final boolean replicated; private final boolean system; private final boolean allowCustomRouting; + private final IndexMode indexMode; public DataStream( String name, @@ -91,7 +93,8 @@ public DataStream( boolean hidden, boolean replicated, boolean system, - boolean allowCustomRouting + boolean allowCustomRouting, + IndexMode indexMode ) { this( name, @@ -103,7 +106,8 @@ public DataStream( replicated, system, System::currentTimeMillis, - allowCustomRouting + allowCustomRouting, + indexMode ); } @@ -118,7 +122,8 @@ public DataStream( boolean replicated, boolean system, LongSupplier timeProvider, - boolean allowCustomRouting + boolean allowCustomRouting, + IndexMode indexMode ) { this.name = name; this.timeStampField = timeStampField; @@ -131,6 +136,7 @@ public DataStream( this.timeProvider = timeProvider; this.system = system; this.allowCustomRouting = allowCustomRouting; + this.indexMode = indexMode; assert indices.size() > 0; } @@ -177,13 +183,50 @@ public Index selectTimeSeriesWriteIndex(Instant timestamp, Metadata metadata) { return null; } - public boolean isTimeSeries(Function indices) { - return isTimeSeries(indices.apply(getWriteIndex())); - } - - public boolean isTimeSeries(IndexMetadata indexMetadata) { - IndexMode indexMode = IndexSettings.MODE.get(indexMetadata.getSettings()); - return indexMode == IndexMode.TIME_SERIES; + /** + * Validates this data stream. If this is a time series data stream then this method validates that temporal range + * of backing indices (defined by index.time_series.start_time and index.time_series.end_time) do not overlap with each other. + * + * @param imSupplier Function that supplies {@link IndexMetadata} instances based on the provided index name + */ + public void validate(Function imSupplier) { + if (indexMode == IndexMode.TIME_SERIES) { + // Get a sorted overview of each backing index with there start and end time range: + var startAndEndTimes = indices.stream().map(index -> imSupplier.apply(index.getName())).map(im -> { + Instant start = IndexSettings.TIME_SERIES_START_TIME.get(im.getSettings()); + Instant end = IndexSettings.TIME_SERIES_END_TIME.get(im.getSettings()); + assert end.isAfter(start); // This is also validated by TIME_SERIES_END_TIME setting. + return new Tuple<>(im.getIndex().getName(), new Tuple<>(start, end)); + }) + .sorted(Comparator.comparing(entry -> entry.v2().v1())) // Sort by start time + .collect(Collectors.toList()); + + Tuple> previous = null; + var formatter = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER; + for (var current : startAndEndTimes) { + if (previous == null) { + previous = current; + } else { + // The end_time of previous backing index should be equal or less than start_time of current backing index. + // If previous.end_time > current.start_time then we should fail here: + if (previous.v2().v2().compareTo(current.v2().v1()) > 0) { + String range1 = formatter.format(previous.v2().v1()) + " TO " + formatter.format(previous.v2().v2()); + String range2 = formatter.format(current.v2().v1()) + " TO " + formatter.format(current.v2().v2()); + throw new IllegalArgumentException( + "backing index [" + + previous.v1() + + "] with range [" + + range1 + + "] is overlapping with backing index [" + + current.v1() + + "] with range [" + + range2 + + "]" + ); + } + } + } + } } @Nullable @@ -213,6 +256,11 @@ public boolean isAllowCustomRouting() { return allowCustomRouting; } + @Nullable + public IndexMode getIndexMode() { + return indexMode; + } + /** * Performs a rollover on a {@code DataStream} instance and returns a new instance containing * the updated list of backing indices and incremented generation. @@ -227,7 +275,18 @@ public DataStream rollover(Index writeIndex, long generation) { List backingIndices = new ArrayList<>(indices); backingIndices.add(writeIndex); - return new DataStream(name, timeStampField, backingIndices, generation, metadata, hidden, false, system, allowCustomRouting); + return new DataStream( + name, + timeStampField, + backingIndices, + generation, + metadata, + hidden, + false, + system, + allowCustomRouting, + indexMode + ); } /** @@ -294,7 +353,8 @@ public DataStream removeBackingIndex(Index index) { hidden, replicated, system, - allowCustomRouting + allowCustomRouting, + indexMode ); } @@ -336,7 +396,8 @@ public DataStream replaceBackingIndex(Index existingBackingIndex, Index newBacki hidden, replicated, system, - allowCustomRouting + allowCustomRouting, + indexMode ); } @@ -395,7 +456,8 @@ public DataStream addBackingIndex(Metadata clusterMetadata, Index index) { hidden, replicated, system, - allowCustomRouting + allowCustomRouting, + indexMode ); } @@ -410,7 +472,8 @@ public DataStream promoteDataStream() { false, system, timeProvider, - allowCustomRouting + allowCustomRouting, + indexMode ); } @@ -443,7 +506,8 @@ public DataStream snapshot(Collection indicesInSnapshot) { hidden, replicated, system, - allowCustomRouting + allowCustomRouting, + indexMode ); } @@ -488,7 +552,8 @@ public DataStream(StreamInput in) throws IOException { in.readBoolean(), in.readBoolean(), in.readBoolean(), - in.getVersion().onOrAfter(Version.V_8_0_0) ? in.readBoolean() : false + in.getVersion().onOrAfter(Version.V_8_0_0) ? in.readBoolean() : false, + in.getVersion().onOrAfter(Version.V_8_1_0) ? in.readOptionalEnum(IndexMode.class) : null ); } @@ -509,6 +574,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_8_0_0)) { out.writeBoolean(allowCustomRouting); } + if (out.getVersion().onOrAfter(Version.V_8_1_0)) { + out.writeOptionalEnum(indexMode); + } } public static final ParseField NAME_FIELD = new ParseField("name"); @@ -520,6 +588,7 @@ public void writeTo(StreamOutput out) throws IOException { public static final ParseField REPLICATED_FIELD = new ParseField("replicated"); public static final ParseField SYSTEM_FIELD = new ParseField("system"); public static final ParseField ALLOW_CUSTOM_ROUTING = new ParseField("allow_custom_routing"); + public static final ParseField INDEX_MODE = new ParseField("index_mode"); @SuppressWarnings("unchecked") private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( @@ -533,7 +602,8 @@ public void writeTo(StreamOutput out) throws IOException { args[5] != null && (boolean) args[5], args[6] != null && (boolean) args[6], args[7] != null && (boolean) args[7], - args[8] != null && (boolean) args[8] + args[8] != null && (boolean) args[8], + args[9] != null ? IndexMode.fromString((String) args[9]) : null ) ); @@ -547,6 +617,7 @@ public void writeTo(StreamOutput out) throws IOException { PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), REPLICATED_FIELD); PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), SYSTEM_FIELD); PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), ALLOW_CUSTOM_ROUTING); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), INDEX_MODE); } public static DataStream fromXContent(XContentParser parser) throws IOException { @@ -567,6 +638,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(REPLICATED_FIELD.getPreferredName(), replicated); builder.field(SYSTEM_FIELD.getPreferredName(), system); builder.field(ALLOW_CUSTOM_ROUTING.getPreferredName(), allowCustomRouting); + if (indexMode != null) { + builder.field(INDEX_MODE.getPreferredName(), indexMode); + } builder.endObject(); return builder; } @@ -583,12 +657,13 @@ public boolean equals(Object o) { && Objects.equals(metadata, that.metadata) && hidden == that.hidden && replicated == that.replicated - && allowCustomRouting == that.allowCustomRouting; + && allowCustomRouting == that.allowCustomRouting + && indexMode == that.indexMode; } @Override public int hashCode() { - return Objects.hash(name, timeStampField, indices, generation, metadata, hidden, replicated, allowCustomRouting); + return Objects.hash(name, timeStampField, indices, generation, metadata, hidden, replicated, allowCustomRouting, indexMode); } public static final class TimestampField implements Writeable, ToXContentObject { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexAbstraction.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexAbstraction.java index cb9489b7745c1..7e60d3b8bdcbb 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexAbstraction.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexAbstraction.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.xcontent.XContent; @@ -444,7 +445,7 @@ public Index getWriteIndex(IndexRequest request, Metadata metadata) { return getWriteIndex(); } - if (dataStream.isTimeSeries(metadata::index) == false) { + if (dataStream.getIndexMode() != IndexMode.TIME_SERIES) { return getWriteIndex(); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java index 25ddbbccd9c3a..ad1d2af07e9ac 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java @@ -1352,14 +1352,25 @@ public DataStream dataStream(String dataStreamName) { public Builder dataStreams(Map dataStreams, Map dataStreamAliases) { previousIndicesLookup = null; + // Only perform data stream validation only when data streams are modified in Metadata: + for (DataStream dataStream : dataStreams.values()) { + dataStream.validate(indices::get); + } + this.customs.put(DataStreamMetadata.TYPE, new DataStreamMetadata(dataStreams, dataStreamAliases)); return this; } public Builder put(DataStream dataStream) { previousIndicesLookup = null; - Objects.requireNonNull(dataStream, "it is invalid to add a null data stream"); + + // Every time the backing indices of a data stream is modified a new instance will be created and + // that instance needs to be added here. So this is a good place to do data stream validation for + // the data stream and all of its backing indices. Doing this validation in the build() method would + // trigger this validation on each new Metadata creation, even if there are no changes to data streams. + dataStream.validate(indices::get); + Map existingDataStreams = Optional.ofNullable( (DataStreamMetadata) this.customs.get(DataStreamMetadata.TYPE) ).map(dsmd -> new HashMap<>(dsmd.dataStreams())).orElse(new HashMap<>()); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java index c858c680f8185..5d8b092d9a17e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.mapper.DataStreamTimestampFieldMapper; import org.elasticsearch.index.mapper.MappingLookup; import org.elasticsearch.index.mapper.MetadataFieldMapper; @@ -214,7 +215,8 @@ static ClusterState createDataStream( ).dataStreamName(dataStreamName) .systemDataStreamDescriptor(systemDataStreamDescriptor) .nameResolvedInstant(request.startTime) - .performReroute(request.performReroute()); + .performReroute(request.performReroute()) + .setMatchingTemplate(template); if (isSystem) { createIndexRequest.settings(SystemIndexDescriptor.DEFAULT_SETTINGS); @@ -245,6 +247,7 @@ static ClusterState createDataStream( List dsBackingIndices = backingIndices.stream().map(IndexMetadata::getIndex).collect(Collectors.toList()); dsBackingIndices.add(writeIndex.getIndex()); boolean hidden = isSystem || template.getDataStreamTemplate().isHidden(); + final IndexMode indexMode = template.getDataStreamTemplate().getIndexMode(); DataStream newDataStream = new DataStream( dataStreamName, timestampField, @@ -254,7 +257,8 @@ static ClusterState createDataStream( hidden, false, isSystem, - template.getDataStreamTemplate().isAllowCustomRouting() + template.getDataStreamTemplate().isAllowCustomRouting(), + indexMode ); Metadata.Builder builder = Metadata.builder(currentState.metadata()).put(newDataStream); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java index a6c9cb3db42da..2cf0cde203ad7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java @@ -83,6 +83,7 @@ import java.util.Locale; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiConsumer; @@ -845,6 +846,12 @@ static Settings aggregateIndexSettings( final Settings.Builder additionalIndexSettings = Settings.builder(); final Settings templateAndRequestSettings = Settings.builder().put(combinedTemplateSettings).put(request.settings()).build(); + final IndexMode matchingIndexMode = Optional.of(request) + .map(CreateIndexClusterStateUpdateRequest::matchingTemplate) + .map(ComposableIndexTemplate::getDataStreamTemplate) + .map(ComposableIndexTemplate.DataStreamTemplate::getIndexMode) + .orElse(null); + // Loop through all the explicit index setting providers, adding them to the // additionalIndexSettings map for (IndexSettingProvider provider : indexSettingProviders) { @@ -852,6 +859,7 @@ static Settings aggregateIndexSettings( provider.getAdditionalIndexSettings( request.index(), request.dataStreamName(), + matchingIndexMode, currentState.getMetadata(), request.getNameResolvedAt(), templateAndRequestSettings diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java index 745e415c1ed62..14dfb59ab36bf 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java @@ -633,6 +633,7 @@ private void validateIndexTemplateV2(String name, ComposableIndexTemplate indexT provider.getAdditionalIndexSettings( "validate-index-name", indexTemplate.getDataStreamTemplate() != null ? "validate-data-stream-name" : null, + indexTemplate.getDataStreamTemplate() != null ? indexTemplate.getDataStreamTemplate().getIndexMode() : null, currentState.getMetadata(), System.currentTimeMillis(), finalTemplate.map(Template::settings).orElse(Settings.EMPTY) diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DataTier.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DataTier.java index 5558773927a67..c68f285bb4b13 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DataTier.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DataTier.java @@ -18,6 +18,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettingProvider; import org.elasticsearch.snapshots.SearchableSnapshotsSettings; @@ -234,6 +235,7 @@ public static class DefaultHotAllocationSettingProvider implements IndexSettingP public Settings getAdditionalIndexSettings( String indexName, String dataStreamName, + IndexMode templateIndexMode, Metadata metadata, long resolvedAt, Settings allSettings diff --git a/server/src/main/java/org/elasticsearch/index/IndexMode.java b/server/src/main/java/org/elasticsearch/index/IndexMode.java index 8e236fd1a36e2..3284505c0481e 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexMode.java +++ b/server/src/main/java/org/elasticsearch/index/IndexMode.java @@ -23,9 +23,11 @@ import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; import java.io.IOException; +import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.stream.Collectors; import java.util.stream.Stream; import static java.util.stream.Collectors.toSet; @@ -38,7 +40,7 @@ * to be set or not set and by enabling extra fields in the mapping. */ public enum IndexMode { - STANDARD { + STANDARD("standard") { @Override void validateWithOtherSettings(Map, Object> settings) { settingRequiresTimeSeries(settings, IndexMetadata.INDEX_ROUTING_PATH); @@ -76,7 +78,7 @@ public MetadataFieldMapper buildTimeSeriesIdFieldMapper() { return null; } }, - TIME_SERIES { + TIME_SERIES("time_series") { @Override void validateWithOtherSettings(Map, Object> settings) { if (settings.get(IndexMetadata.INDEX_ROUTING_PARTITION_SIZE_SETTING) != Integer.valueOf(1)) { @@ -180,6 +182,16 @@ public MetadataFieldMapper buildTimeSeriesIdFieldMapper() { ).collect(toSet()) ); + private final String name; + + IndexMode(String name) { + this.name = name; + } + + public String getName() { + return name; + } + abstract void validateWithOtherSettings(Map, Object> settings); /** @@ -209,4 +221,23 @@ public MetadataFieldMapper buildTimeSeriesIdFieldMapper() { * field mappers for the index. */ public abstract MetadataFieldMapper buildTimeSeriesIdFieldMapper(); + + public static IndexMode fromString(String value) { + return switch (value) { + case "standard" -> IndexMode.STANDARD; + case "time_series" -> IndexMode.TIME_SERIES; + default -> throw new IllegalArgumentException( + "[" + + value + + "] is an invalid index mode, valid modes are: [" + + Arrays.stream(IndexMode.values()).map(IndexMode::toString).collect(Collectors.joining()) + + "]" + ); + }; + } + + @Override + public String toString() { + return getName(); + } } diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettingProvider.java b/server/src/main/java/org/elasticsearch/index/IndexSettingProvider.java index ced531a615a3d..7837feb3eed94 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettingProvider.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettingProvider.java @@ -22,9 +22,17 @@ public interface IndexSettingProvider { * * @param indexName The name of the new index being created * @param dataStreamName The name of the data stream if the index being created is part of a data stream otherwise null + * @param templateIndexMode The index mode from the data stream template of the matching template. * @param metadata The current metadata instance that doesn't yet contain the index to be created * @param resolvedAt The time the request to create this new index was accepted. * @param allSettings All the setting resolved from the template that matches and any setting defined on the create index request */ - Settings getAdditionalIndexSettings(String indexName, String dataStreamName, Metadata metadata, long resolvedAt, Settings allSettings); + Settings getAdditionalIndexSettings( + String indexName, + String dataStreamName, + IndexMode templateIndexMode, + Metadata metadata, + long resolvedAt, + Settings allSettings + ); } diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java index f574fb5b00feb..8c0fc498a1e67 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -699,7 +699,8 @@ static DataStream updateDataStream(DataStream dataStream, Metadata.Builder metad dataStream.isHidden(), dataStream.isReplicated(), dataStream.isSystem(), - dataStream.isAllowCustomRouting() + dataStream.isAllowCustomRouting(), + dataStream.getIndexMode() ); } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTemplateTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTemplateTests.java index 323951dece0df..38b68df728e81 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTemplateTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTemplateTests.java @@ -9,6 +9,7 @@ import org.elasticsearch.cluster.metadata.ComposableIndexTemplate.DataStreamTemplate; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xcontent.XContentParser; @@ -32,7 +33,8 @@ protected DataStreamTemplate createTestInstance() { } public static DataStreamTemplate randomInstance() { - return new ComposableIndexTemplate.DataStreamTemplate(randomBoolean(), randomBoolean()); + IndexMode indexMode = randomBoolean() ? randomFrom(IndexMode.values()) : null; + return new ComposableIndexTemplate.DataStreamTemplate(randomBoolean(), randomBoolean(), indexMode); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java index a6256fb88a544..472d69b19c619 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java @@ -14,6 +14,8 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xcontent.XContentParser; @@ -33,8 +35,10 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; import static org.hamcrest.Matchers.hasItems; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.in; import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; public class DataStreamTests extends AbstractSerializingTestCase { @@ -437,7 +441,8 @@ public void testSnapshot() { preSnapshotDataStream.isHidden(), preSnapshotDataStream.isReplicated() && randomBoolean(), preSnapshotDataStream.isSystem(), - preSnapshotDataStream.isAllowCustomRouting() + preSnapshotDataStream.isAllowCustomRouting(), + preSnapshotDataStream.getIndexMode() ); var reconciledDataStream = postSnapshotDataStream.snapshot( @@ -479,7 +484,8 @@ public void testSnapshotWithAllBackingIndicesRemoved() { preSnapshotDataStream.isHidden(), preSnapshotDataStream.isReplicated(), preSnapshotDataStream.isSystem(), - preSnapshotDataStream.isAllowCustomRouting() + preSnapshotDataStream.isAllowCustomRouting(), + preSnapshotDataStream.getIndexMode() ); assertNull( @@ -519,4 +525,86 @@ public void testSelectTimeSeriesWriteIndex() { assertThat(result.getName(), equalTo(DataStream.getDefaultBackingIndexName(dataStreamName, 1, start1.toEpochMilli()))); } + public void testValidate() { + { + // Valid cases: + Instant currentTime = Instant.now().truncatedTo(ChronoUnit.MILLIS); + + // These ranges are on the edge of each other temporal boundaries. + Instant start1 = currentTime.minus(6, ChronoUnit.HOURS); + Instant end1 = currentTime.minus(2, ChronoUnit.HOURS); + Instant start2 = currentTime.minus(2, ChronoUnit.HOURS); + Instant end2 = currentTime.plus(2, ChronoUnit.HOURS); + + String dataStreamName = "logs_my-app_prod"; + var clusterState = DataStreamTestHelper.getClusterStateWithDataStream( + dataStreamName, + List.of(Tuple.tuple(start1, end1), Tuple.tuple(start2, end2)) + ); + DataStream dataStream = clusterState.getMetadata().dataStreams().get(dataStreamName); + assertThat(dataStream, notNullValue()); + assertThat(dataStream.getIndices(), hasSize(2)); + assertThat( + IndexSettings.TIME_SERIES_START_TIME.get(clusterState.getMetadata().index(dataStream.getIndices().get(0)).getSettings()), + equalTo(start1) + ); + assertThat( + IndexSettings.TIME_SERIES_END_TIME.get(clusterState.getMetadata().index(dataStream.getIndices().get(0)).getSettings()), + equalTo(end1) + ); + assertThat( + IndexSettings.TIME_SERIES_START_TIME.get(clusterState.getMetadata().index(dataStream.getIndices().get(1)).getSettings()), + equalTo(start2) + ); + assertThat( + IndexSettings.TIME_SERIES_END_TIME.get(clusterState.getMetadata().index(dataStream.getIndices().get(1)).getSettings()), + equalTo(end2) + ); + + // Create a temporal gap between, this is valid and shouldn't fail: + DataStreamTestHelper.getClusterStateWithDataStream( + dataStreamName, + List.of(Tuple.tuple(start1, end1.minus(1, ChronoUnit.MINUTES)), Tuple.tuple(start2.plus(1, ChronoUnit.MINUTES), end2)) + ); + } + { + // Invalid case: + Instant currentTime = Instant.now(); + + Instant start1 = currentTime.minus(6, ChronoUnit.HOURS); + Instant end1 = currentTime.minus(2, ChronoUnit.HOURS); + // Start2 is inside start1 and end1 range: + Instant start2 = currentTime.minus(3, ChronoUnit.HOURS); + Instant end2 = currentTime.plus(2, ChronoUnit.HOURS); + + String dataStreamName = "logs_my-app_prod"; + var e = expectThrows( + IllegalArgumentException.class, + () -> DataStreamTestHelper.getClusterStateWithDataStream( + dataStreamName, + List.of(Tuple.tuple(start1, end1), Tuple.tuple(start2, end2)) + ) + ); + var formatter = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER; + assertThat( + e.getMessage(), + equalTo( + "backing index [" + + DataStream.getDefaultBackingIndexName(dataStreamName, 1, start1.toEpochMilli()) + + "] with range [" + + formatter.format(start1) + + " TO " + + formatter.format(end1) + + "] is overlapping with backing index [" + + DataStream.getDefaultBackingIndexName(dataStreamName, 2, start2.toEpochMilli()) + + "] with range [" + + formatter.format(start2) + + " TO " + + formatter.format(end2) + + "]" + ) + ); + } + } + } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java index e6aefa60ded61..5fbd5f8b23543 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java @@ -2785,7 +2785,8 @@ public void testHiddenDataStreams() { true, false, false, - false + false, + null ) ) ) diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java index 37e2cf71d3362..3bb851316ead8 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java @@ -21,6 +21,7 @@ import org.elasticsearch.core.Tuple; import org.elasticsearch.env.Environment; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettingProvider; import org.elasticsearch.index.IndexSettingProviders; @@ -89,7 +90,7 @@ public static DataStream newInstance( long generation, Map metadata ) { - return new DataStream(name, timeStampField, indices, generation, metadata, false, false, false, false); + return new DataStream(name, timeStampField, indices, generation, metadata, false, false, false, false, null); } public static String getLegacyDefaultBackingIndexName( @@ -207,6 +208,7 @@ public static DataStream randomInstance(String dataStreamName, LongSupplier time if (randomBoolean()) { metadata = Map.of("key", "value"); } + return new DataStream( dataStreamName, createTimestampField("@timestamp"), @@ -215,9 +217,10 @@ public static DataStream randomInstance(String dataStreamName, LongSupplier time metadata, randomBoolean(), randomBoolean(), - false, + false, // Some tests don't work well with system data streams, since these data streams require special handling timeProvider, - false + randomBoolean(), + randomBoolean() ? IndexMode.STANDARD : null // IndexMode.TIME_SERIES triggers validation that many unit tests doesn't pass ); } @@ -315,10 +318,17 @@ public static ClusterState getClusterStateWithDataStream(String dataStream, List backingIndices.add(im); generation++; } - DataStream ds = newInstance( + DataStream ds = new DataStream( dataStream, createTimestampField("@timestamp"), - backingIndices.stream().map(IndexMetadata::getIndex).collect(Collectors.toList()) + backingIndices.stream().map(IndexMetadata::getIndex).collect(Collectors.toList()), + backingIndices.size(), + null, + false, + false, + false, + false, + IndexMode.TIME_SERIES ); builder.put(ds); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java index fc0c47dc42e92..1661585b5062f 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java @@ -311,7 +311,8 @@ static DataStream updateLocalDataStream(Index backingIndexToFollow, DataStream l remoteDataStream.isHidden(), true, remoteDataStream.isSystem(), - remoteDataStream.isAllowCustomRouting() + remoteDataStream.isAllowCustomRouting(), + remoteDataStream.getIndexMode() ); } else { if (localDataStream.isReplicated() == false) { @@ -342,7 +343,8 @@ static DataStream updateLocalDataStream(Index backingIndexToFollow, DataStream l localDataStream.isHidden(), localDataStream.isReplicated(), localDataStream.isSystem(), - localDataStream.isAllowCustomRouting() + localDataStream.isAllowCustomRouting(), + localDataStream.getIndexMode() ); } } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java index 2a9ef8a491e53..09d089901dbda 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java @@ -2281,7 +2281,8 @@ private static ClusterState createRemoteClusterStateWithDataStream(String dataSt false, false, system, - false + false, + null ); ClusterState.Builder csBuilder = ClusterState.builder(new ClusterName("remote")) .metadata(Metadata.builder().put(indexMetadata, true).put(dataStream).version(0L)); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowActionTests.java index 6043080baee7a..590ffdeabc858 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowActionTests.java @@ -87,13 +87,25 @@ static DataStream generateDataSteam(String name, int numBackingIndices, boolean false, replicate, false, - false + false, + null ); } static DataStream generateDataSteam(String name, int generation, boolean replicate, String... backingIndexNames) { List backingIndices = Arrays.stream(backingIndexNames).map(value -> new Index(value, "uuid")).collect(Collectors.toList()); - return new DataStream(name, new TimestampField("@timestamp"), backingIndices, generation, Map.of(), false, replicate, false, false); + return new DataStream( + name, + new TimestampField("@timestamp"), + backingIndices, + generation, + Map.of(), + false, + replicate, + false, + false, + null + ); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyUtilsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyUtilsTests.java index 0887b34d6b110..c74199f434f58 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyUtilsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyUtilsTests.java @@ -137,7 +137,7 @@ public void testCalculateUsage() { null, null, null, - new ComposableIndexTemplate.DataStreamTemplate(false, false) + new ComposableIndexTemplate.DataStreamTemplate(false, false, null) ) ) ) @@ -206,7 +206,7 @@ public void testCalculateUsage() { null, null, null, - new ComposableIndexTemplate.DataStreamTemplate(false, false) + new ComposableIndexTemplate.DataStreamTemplate(false, false, null) ) ) ) diff --git a/x-pack/plugin/data-streams/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/datastreams/TsdbDataStreamRestIT.java b/x-pack/plugin/data-streams/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/datastreams/TsdbDataStreamRestIT.java index 78dc6f0170e9e..912411c56e72a 100644 --- a/x-pack/plugin/data-streams/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/datastreams/TsdbDataStreamRestIT.java +++ b/x-pack/plugin/data-streams/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/datastreams/TsdbDataStreamRestIT.java @@ -36,7 +36,6 @@ public class TsdbDataStreamRestIT extends ESRestTestCase { "index": { "number_of_replicas": 0, "number_of_shards": 2, - "mode": "time_series", "routing_path": ["metricset", "time_series_dimension"] } }, @@ -80,7 +79,9 @@ public class TsdbDataStreamRestIT extends ESRestTestCase { } } }, - "data_stream": {} + "data_stream": { + "index_mode": "time_series" + } }"""; private static final String DOC = """ diff --git a/x-pack/plugin/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java b/x-pack/plugin/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java index 1f8e3188fcc2f..2d976798565b6 100644 --- a/x-pack/plugin/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java +++ b/x-pack/plugin/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java @@ -1137,7 +1137,7 @@ public void testIndexDocsWithCustomRoutingAllowed() throws Exception { null, null, null, - new ComposableIndexTemplate.DataStreamTemplate(false, true) + new ComposableIndexTemplate.DataStreamTemplate(false, true, null) ); client().execute( PutComposableIndexTemplateAction.INSTANCE, @@ -1787,7 +1787,7 @@ public void testPartitionedTemplate() throws IOException { null, null, null, - new ComposableIndexTemplate.DataStreamTemplate(false, true) + new ComposableIndexTemplate.DataStreamTemplate(false, true, null) ); ComposableIndexTemplate finalTemplate = template; client().execute( @@ -1813,7 +1813,7 @@ public void testPartitionedTemplate() throws IOException { null, null, null, - new ComposableIndexTemplate.DataStreamTemplate(false, true) + new ComposableIndexTemplate.DataStreamTemplate(false, true, null) ); client().execute( PutComposableIndexTemplateAction.INSTANCE, @@ -1839,7 +1839,7 @@ public void testPartitionedTemplate() throws IOException { null, null, null, - new ComposableIndexTemplate.DataStreamTemplate(false, false) + new ComposableIndexTemplate.DataStreamTemplate(false, false, null) ); ComposableIndexTemplate finalTemplate1 = template; Exception e = expectThrows( @@ -1881,7 +1881,7 @@ public void testSearchWithRouting() throws IOException, ExecutionException, Inte null, null, null, - new ComposableIndexTemplate.DataStreamTemplate(false, true) + new ComposableIndexTemplate.DataStreamTemplate(false, true, null) ); client().execute( PutComposableIndexTemplateAction.INSTANCE, diff --git a/x-pack/plugin/data-streams/src/main/java/org/elasticsearch/xpack/datastreams/DataStreamIndexSettingsProvider.java b/x-pack/plugin/data-streams/src/main/java/org/elasticsearch/xpack/datastreams/DataStreamIndexSettingsProvider.java index ae66b25f00802..670e504db3166 100644 --- a/x-pack/plugin/data-streams/src/main/java/org/elasticsearch/xpack/datastreams/DataStreamIndexSettingsProvider.java +++ b/x-pack/plugin/data-streams/src/main/java/org/elasticsearch/xpack/datastreams/DataStreamIndexSettingsProvider.java @@ -20,7 +20,6 @@ import java.time.Instant; import java.util.Locale; -import java.util.Optional; public class DataStreamIndexSettingsProvider implements IndexSettingProvider { @@ -30,41 +29,46 @@ public class DataStreamIndexSettingsProvider implements IndexSettingProvider { public Settings getAdditionalIndexSettings( String indexName, String dataStreamName, + IndexMode templateIndexMode, Metadata metadata, long resolvedAt, Settings allSettings ) { if (dataStreamName != null) { - IndexMode indexMode = Optional.ofNullable(allSettings.get(IndexSettings.MODE.getKey())) - .map(value -> IndexMode.valueOf(value.toUpperCase(Locale.ROOT))) - .orElse(IndexMode.STANDARD); - if (indexMode == IndexMode.TIME_SERIES) { - TimeValue lookAheadTime = allSettings.getAsTime( - IndexSettings.LOOK_AHEAD_TIME.getKey(), - IndexSettings.LOOK_AHEAD_TIME.getDefault(allSettings) - ); + DataStream dataStream = metadata.dataStreams().get(dataStreamName); + IndexMode indexMode; + if (dataStream != null) { + indexMode = dataStream.getIndexMode(); + } else { + indexMode = templateIndexMode; + } + if (indexMode != null) { Settings.Builder builder = Settings.builder(); - DataStream dataStream = metadata.dataStreams().get(dataStreamName); - Instant start; - if (dataStream == null) { - start = Instant.ofEpochMilli(resolvedAt).minusMillis(lookAheadTime.getMillis()); - } else { - IndexMetadata currentLatestBackingIndex = metadata.index(dataStream.getWriteIndex()); - if (currentLatestBackingIndex.getSettings().hasValue(IndexSettings.TIME_SERIES_END_TIME.getKey()) == false) { - throw new IllegalStateException( - String.format( - Locale.ROOT, - "backing index [%s] in tsdb mode doesn't have the [%s] index setting", - currentLatestBackingIndex.getIndex().getName(), - IndexSettings.TIME_SERIES_START_TIME.getKey() - ) - ); + builder.put(IndexSettings.MODE.getKey(), indexMode); + + if (indexMode == IndexMode.TIME_SERIES) { + TimeValue lookAheadTime = IndexSettings.LOOK_AHEAD_TIME.get(allSettings); + Instant start; + if (dataStream == null) { + start = Instant.ofEpochMilli(resolvedAt).minusMillis(lookAheadTime.getMillis()); + } else { + IndexMetadata currentLatestBackingIndex = metadata.index(dataStream.getWriteIndex()); + if (currentLatestBackingIndex.getSettings().hasValue(IndexSettings.TIME_SERIES_END_TIME.getKey()) == false) { + throw new IllegalStateException( + String.format( + Locale.ROOT, + "backing index [%s] in tsdb mode doesn't have the [%s] index setting", + currentLatestBackingIndex.getIndex().getName(), + IndexSettings.TIME_SERIES_END_TIME.getKey() + ) + ); + } + start = IndexSettings.TIME_SERIES_END_TIME.get(currentLatestBackingIndex.getSettings()); } - start = IndexSettings.TIME_SERIES_END_TIME.get(currentLatestBackingIndex.getSettings()); + builder.put(IndexSettings.TIME_SERIES_START_TIME.getKey(), FORMATTER.format(start)); + Instant end = Instant.ofEpochMilli(resolvedAt).plusMillis(lookAheadTime.getMillis()); + builder.put(IndexSettings.TIME_SERIES_END_TIME.getKey(), FORMATTER.format(end)); } - builder.put(IndexSettings.TIME_SERIES_START_TIME.getKey(), FORMATTER.format(start)); - Instant end = Instant.ofEpochMilli(resolvedAt).plusMillis(lookAheadTime.getMillis()); - builder.put(IndexSettings.TIME_SERIES_END_TIME.getKey(), FORMATTER.format(end)); return builder.build(); } } diff --git a/x-pack/plugin/data-streams/src/test/java/org/elasticsearch/xpack/datastreams/DataStreamIndexSettingsProviderTests.java b/x-pack/plugin/data-streams/src/test/java/org/elasticsearch/xpack/datastreams/DataStreamIndexSettingsProviderTests.java index f6243049d4613..7cb683e00b85c 100644 --- a/x-pack/plugin/data-streams/src/test/java/org/elasticsearch/xpack/datastreams/DataStreamIndexSettingsProviderTests.java +++ b/x-pack/plugin/data-streams/src/test/java/org/elasticsearch/xpack/datastreams/DataStreamIndexSettingsProviderTests.java @@ -13,12 +13,14 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.test.ESTestCase; import java.time.Instant; import java.time.temporal.ChronoUnit; import java.util.List; +import java.util.Locale; import static org.elasticsearch.common.settings.Settings.builder; import static org.elasticsearch.xpack.datastreams.DataStreamIndexSettingsProvider.FORMATTER; @@ -32,15 +34,18 @@ public void testGetAdditionalIndexSettings() { Instant now = Instant.now().truncatedTo(ChronoUnit.MILLIS); TimeValue lookAheadTime = TimeValue.timeValueHours(2); // default - Settings settings = builder().put("index.mode", "time_series").build(); + Settings settings = Settings.EMPTY; var provider = new DataStreamIndexSettingsProvider(); Settings result = provider.getAdditionalIndexSettings( DataStream.getDefaultBackingIndexName(dataStreamName, 1), dataStreamName, + IndexMode.TIME_SERIES, metadata, now.toEpochMilli(), settings ); + assertThat(result.size(), equalTo(3)); + assertThat(result.get(IndexSettings.MODE.getKey()), equalTo(IndexMode.TIME_SERIES.name().toLowerCase(Locale.ROOT))); assertThat(IndexSettings.TIME_SERIES_START_TIME.get(result), equalTo(now.minusMillis(lookAheadTime.getMillis()))); assertThat(IndexSettings.TIME_SERIES_END_TIME.get(result), equalTo(now.plusMillis(lookAheadTime.getMillis()))); } @@ -56,59 +61,41 @@ public void testGetAdditionalIndexSettingsLookAheadTime() { Settings result = provider.getAdditionalIndexSettings( DataStream.getDefaultBackingIndexName(dataStreamName, 1), dataStreamName, + IndexMode.TIME_SERIES, metadata, now.toEpochMilli(), settings ); - assertThat(result.size(), equalTo(2)); + assertThat(result.size(), equalTo(3)); + assertThat(result.get(IndexSettings.MODE.getKey()), equalTo(IndexMode.TIME_SERIES.name().toLowerCase(Locale.ROOT))); assertThat(IndexSettings.TIME_SERIES_START_TIME.get(result), equalTo(now.minusMillis(lookAheadTime.getMillis()))); assertThat(IndexSettings.TIME_SERIES_END_TIME.get(result), equalTo(now.plusMillis(lookAheadTime.getMillis()))); } - public void testGetAdditionalIndexSettingsNoTimeSeries() { - Metadata metadata = Metadata.EMPTY_METADATA; - String dataStreamName = "logs-app1"; - - long now = Instant.now().toEpochMilli(); - Settings settings = randomBoolean() ? Settings.EMPTY : builder().put("index.mode", "standard").build(); - var provider = new DataStreamIndexSettingsProvider(); - Settings result = provider.getAdditionalIndexSettings( - DataStream.getDefaultBackingIndexName(dataStreamName, 1), - dataStreamName, - metadata, - now, - settings - ); - assertThat(result, equalTo(Settings.EMPTY)); - } - public void testGetAdditionalIndexSettingsDataStreamAlreadyCreated() { String dataStreamName = "logs-app1"; TimeValue lookAheadTime = TimeValue.timeValueHours(2); Instant sixHoursAgo = Instant.now().minus(6, ChronoUnit.HOURS).truncatedTo(ChronoUnit.MILLIS); Instant currentEnd = sixHoursAgo.plusMillis(lookAheadTime.getMillis()); - Metadata metadata = DataStreamTestHelper.getClusterStateWithDataStreams( - List.of(Tuple.tuple(dataStreamName, 1)), - List.of(), - sixHoursAgo.toEpochMilli(), - builder().put(IndexSettings.TIME_SERIES_START_TIME.getKey(), FORMATTER.format(sixHoursAgo)) - .put(IndexSettings.TIME_SERIES_END_TIME.getKey(), FORMATTER.format(currentEnd)) - .build(), - 1 + Metadata metadata = DataStreamTestHelper.getClusterStateWithDataStream( + dataStreamName, + List.of(new Tuple<>(sixHoursAgo, currentEnd)) ).getMetadata(); Instant now = sixHoursAgo.plus(6, ChronoUnit.HOURS); - Settings settings = builder().put("index.mode", "time_series").build(); + Settings settings = Settings.EMPTY; var provider = new DataStreamIndexSettingsProvider(); var result = provider.getAdditionalIndexSettings( DataStream.getDefaultBackingIndexName(dataStreamName, 1), dataStreamName, + IndexMode.TIME_SERIES, metadata, now.toEpochMilli(), settings ); - assertThat(result.size(), equalTo(2)); + assertThat(result.size(), equalTo(3)); + assertThat(result.get(IndexSettings.MODE.getKey()), equalTo(IndexMode.TIME_SERIES.name().toLowerCase(Locale.ROOT))); assertThat(result.get(IndexSettings.TIME_SERIES_START_TIME.getKey()), equalTo(FORMATTER.format(currentEnd))); assertThat( result.get(IndexSettings.TIME_SERIES_END_TIME.getKey()), @@ -119,22 +106,41 @@ public void testGetAdditionalIndexSettingsDataStreamAlreadyCreated() { public void testGetAdditionalIndexSettingsDataStreamAlreadyCreatedTimeSettingsMissing() { String dataStreamName = "logs-app1"; Instant twoHoursAgo = Instant.now().minus(4, ChronoUnit.HOURS).truncatedTo(ChronoUnit.MILLIS); - Metadata metadata = DataStreamTestHelper.getClusterStateWithDataStreams( - List.of(Tuple.tuple(dataStreamName, 1)), - List.of(), - twoHoursAgo.toEpochMilli(), - builder().build(), - 1 - ).getMetadata(); + Metadata.Builder mb = Metadata.builder( + DataStreamTestHelper.getClusterStateWithDataStreams( + List.of(Tuple.tuple(dataStreamName, 1)), + List.of(), + twoHoursAgo.toEpochMilli(), + builder().build(), + 1 + ).getMetadata() + ); + DataStream ds = mb.dataStream(dataStreamName); + mb.put( + new DataStream( + ds.getName(), + ds.getTimeStampField(), + ds.getIndices(), + ds.getGeneration(), + ds.getMetadata(), + ds.isHidden(), + ds.isReplicated(), + ds.isSystem(), + ds.isAllowCustomRouting(), + IndexMode.TIME_SERIES + ) + ); + Metadata metadata = mb.build(); Instant now = twoHoursAgo.plus(2, ChronoUnit.HOURS); - Settings settings = builder().put("index.mode", "time_series").build(); + Settings settings = Settings.EMPTY; var provider = new DataStreamIndexSettingsProvider(); Exception e = expectThrows( IllegalStateException.class, () -> provider.getAdditionalIndexSettings( DataStream.getDefaultBackingIndexName(dataStreamName, 1), dataStreamName, + IndexMode.TIME_SERIES, metadata, now.toEpochMilli(), settings @@ -143,11 +149,46 @@ public void testGetAdditionalIndexSettingsDataStreamAlreadyCreatedTimeSettingsMi assertThat( e.getMessage(), equalTo( - "backing index [%s] in tsdb mode doesn't have the [index.time_series.start_time] index setting".formatted( + "backing index [%s] in tsdb mode doesn't have the [index.time_series.end_time] index setting".formatted( DataStream.getDefaultBackingIndexName(dataStreamName, 1, twoHoursAgo.toEpochMilli()) ) ) ); } + public void testGetAdditionalIndexSettingsIndexModeNotSpecified() { + Metadata metadata = Metadata.EMPTY_METADATA; + String dataStreamName = "logs-app1"; + + Settings settings = Settings.EMPTY; + var provider = new DataStreamIndexSettingsProvider(); + Settings result = provider.getAdditionalIndexSettings( + DataStream.getDefaultBackingIndexName(dataStreamName, 1), + dataStreamName, + null, + metadata, + 1L, + settings + ); + assertThat(result.size(), equalTo(0)); + } + + public void testGetAdditionalIndexSettingsIndexModeStandardSpecified() { + Metadata metadata = Metadata.EMPTY_METADATA; + String dataStreamName = "logs-app1"; + + Settings settings = Settings.EMPTY; + var provider = new DataStreamIndexSettingsProvider(); + Settings result = provider.getAdditionalIndexSettings( + DataStream.getDefaultBackingIndexName(dataStreamName, 1), + dataStreamName, + IndexMode.STANDARD, + metadata, + 1L, + settings + ); + assertThat(result.size(), equalTo(1)); + assertThat(result.get(IndexSettings.MODE.getKey()), equalTo(IndexMode.STANDARD.name().toLowerCase(Locale.ROOT))); + } + } diff --git a/x-pack/plugin/data-streams/src/test/java/org/elasticsearch/xpack/datastreams/DataStreamsStatsTests.java b/x-pack/plugin/data-streams/src/test/java/org/elasticsearch/xpack/datastreams/DataStreamsStatsTests.java index b101d2f692708..d0f01d0d7906e 100644 --- a/x-pack/plugin/data-streams/src/test/java/org/elasticsearch/xpack/datastreams/DataStreamsStatsTests.java +++ b/x-pack/plugin/data-streams/src/test/java/org/elasticsearch/xpack/datastreams/DataStreamsStatsTests.java @@ -240,7 +240,7 @@ private String createDataStream(boolean hidden) throws Exception { null, null, null, - new ComposableIndexTemplate.DataStreamTemplate(hidden, false), + new ComposableIndexTemplate.DataStreamTemplate(hidden, false, null), null ); assertTrue( diff --git a/x-pack/plugin/data-streams/src/test/java/org/elasticsearch/xpack/datastreams/MetadataDataStreamRolloverServiceTests.java b/x-pack/plugin/data-streams/src/test/java/org/elasticsearch/xpack/datastreams/MetadataDataStreamRolloverServiceTests.java index 9b259c79af71c..641b77a88a42c 100644 --- a/x-pack/plugin/data-streams/src/test/java/org/elasticsearch/xpack/datastreams/MetadataDataStreamRolloverServiceTests.java +++ b/x-pack/plugin/data-streams/src/test/java/org/elasticsearch/xpack/datastreams/MetadataDataStreamRolloverServiceTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.cluster.metadata.Template; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; @@ -49,14 +50,21 @@ public class MetadataDataStreamRolloverServiceTests extends ESTestCase { public void testRolloverClusterStateForDataStream() throws Exception { Instant now = Instant.now(); String dataStreamName = "logs-my-app"; - final DataStream dataStream = DataStreamTestHelper.newInstance( + final DataStream dataStream = new DataStream( dataStreamName, new DataStream.TimestampField("@timestamp"), - List.of(new Index(DataStream.getDefaultBackingIndexName(dataStreamName, 1, now.toEpochMilli()), "uuid")) + List.of(new Index(DataStream.getDefaultBackingIndexName(dataStreamName, 1, now.toEpochMilli()), "uuid")), + 1, + null, + false, + false, + false, + false, + IndexMode.TIME_SERIES ); ComposableIndexTemplate template = new ComposableIndexTemplate.Builder().indexPatterns(List.of(dataStream.getName() + "*")) .template(new Template(Settings.builder().put("index.mode", "time_series").build(), null, null)) - .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false, IndexMode.TIME_SERIES)) .build(); Metadata.Builder builder = Metadata.builder(); builder.put("template", template); diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/v2/TransportRollupAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/v2/TransportRollupAction.java index 1ba9c4a4efa84..7e809c6d5e4a4 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/v2/TransportRollupAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/v2/TransportRollupAction.java @@ -364,7 +364,8 @@ public ClusterState execute(ClusterState currentState) { originalDataStream.isHidden(), originalDataStream.isReplicated(), originalDataStream.isSystem(), - originalDataStream.isAllowCustomRouting() + originalDataStream.isAllowCustomRouting(), + originalDataStream.getIndexMode() ); metadataBuilder.put(dataStream); } From b42ba6401905468772cf274287d5aec617c23806 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Thu, 27 Jan 2022 12:03:58 +0100 Subject: [PATCH 067/100] [DOCS] Fixes geo function field names. (#83198) --- .../functions/ml-geo-functions.asciidoc | 21 ++++++++++--------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/docs/reference/ml/anomaly-detection/functions/ml-geo-functions.asciidoc b/docs/reference/ml/anomaly-detection/functions/ml-geo-functions.asciidoc index 362f697ad3956..5c061daa1cd44 100644 --- a/docs/reference/ml/anomaly-detection/functions/ml-geo-functions.asciidoc +++ b/docs/reference/ml/anomaly-detection/functions/ml-geo-functions.asciidoc @@ -35,8 +35,8 @@ PUT _ml/anomaly_detectors/example1 "analysis_config": { "detectors": [{ "function" : "lat_long", - "field_name" : "transactionCoordinates", - "by_field_name" : "creditCardNumber" + "field_name" : "transaction_coordinates", + "by_field_name" : "credit_card_number" }] }, "data_description": { @@ -49,13 +49,14 @@ PUT _ml/anomaly_detectors/example1 If you use this `lat_long` function in a detector in your {anomaly-job}, it detects anomalies where the geographic location of a credit card transaction is -unusual for a particular customer’s credit card. An anomaly might indicate fraud. +unusual for a particular customer’s credit card. An anomaly might indicate +fraud. -IMPORTANT: The `field_name` that you supply must be a single string that contains -two comma-separated numbers of the form `latitude,longitude`, a `geo_point` field, -a `geo_shape` field that contains point values, or a `geo_centroid` aggregation. -The `latitude` and `longitude` must be in the range -180 to 180 and represent a -point on the surface of the Earth. +IMPORTANT: The `field_name` that you supply must be a single string that +contains two comma-separated numbers of the form `latitude,longitude`, a +`geo_point` field, a `geo_shape` field that contains point values, or a +`geo_centroid` aggregation. The `latitude` and `longitude` must be in the range +-180 to 180 and represent a point on the surface of the Earth. For example, JSON data might contain the following transaction coordinates: @@ -63,8 +64,8 @@ For example, JSON data might contain the following transaction coordinates: -------------------------------------------------- { "time": 1460464275, - "transactionCoordinates": "40.7,-74.0", - "creditCardNumber": "1234123412341234" + "transaction_coordinates": "40.7,-74.0", + "credit_card_number": "1234123412341234" } -------------------------------------------------- // NOTCONSOLE From ac9f30a73c7382be223185949d7377843f468e8b Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Thu, 27 Jan 2022 13:42:54 +0100 Subject: [PATCH 068/100] Provide access to _type in 5.x indices (#83195) Allows running queries against _type on 5.x indices as well as returning _type in search results. Relates #81210 --- .../index/fieldvisitor/FieldsVisitor.java | 8 +- .../index/mapper/LegacyTypeFieldMapper.java | 98 +++++++++++++++++++ .../index/mapper/MapperRegistry.java | 9 +- .../oldrepos/OldRepositoryAccessIT.java | 49 ++++++++-- 4 files changed, 151 insertions(+), 13 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/index/mapper/LegacyTypeFieldMapper.java diff --git a/server/src/main/java/org/elasticsearch/index/fieldvisitor/FieldsVisitor.java b/server/src/main/java/org/elasticsearch/index/fieldvisitor/FieldsVisitor.java index 1556410ca0c2c..787d9ba7c7a6e 100644 --- a/server/src/main/java/org/elasticsearch/index/fieldvisitor/FieldsVisitor.java +++ b/server/src/main/java/org/elasticsearch/index/fieldvisitor/FieldsVisitor.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.IgnoredFieldMapper; +import org.elasticsearch.index.mapper.LegacyTypeFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.RoutingFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; @@ -68,7 +69,9 @@ public Status needsField(FieldInfo fieldInfo) { } // support _uid for loading older indices if ("_uid".equals(fieldInfo.name)) { - return Status.YES; + if (requiredFields.remove(IdFieldMapper.NAME) || requiredFields.remove(LegacyTypeFieldMapper.NAME)) { + return Status.YES; + } } // All these fields are single-valued so we can stop when the set is // empty @@ -111,8 +114,9 @@ public void stringField(FieldInfo fieldInfo, String value) { if ("_uid".equals(fieldInfo.name)) { // 5.x-only int delimiterIndex = value.indexOf('#'); // type is not allowed to have # in it..., ids can - // type = value.substring(0, delimiterIndex); + String type = value.substring(0, delimiterIndex); id = value.substring(delimiterIndex + 1); + addValue(LegacyTypeFieldMapper.NAME, type); } else if (IdFieldMapper.NAME.equals(fieldInfo.name)) { // only applies to 5.x indices that have single_type = true id = value; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/LegacyTypeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/LegacyTypeFieldMapper.java new file mode 100644 index 0000000000000..a5e0ec86db775 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/mapper/LegacyTypeFieldMapper.java @@ -0,0 +1,98 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.document.SortedSetDocValuesField; +import org.apache.lucene.sandbox.search.DocValuesTermsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.index.query.SearchExecutionContext; + +import java.util.Collection; +import java.util.Collections; + +/** + * Field mapper to access the legacy _type that existed in Elasticsearch 5 + */ +public class LegacyTypeFieldMapper extends MetadataFieldMapper { + + public static final String NAME = "_type"; + + public static final String CONTENT_TYPE = "_type"; + + private static final LegacyTypeFieldMapper INSTANCE = new LegacyTypeFieldMapper(); + + public static final TypeParser PARSER = new FixedTypeParser(c -> INSTANCE); + + protected LegacyTypeFieldMapper() { + super(new LegacyTypeFieldType(), Lucene.KEYWORD_ANALYZER); + } + + static final class LegacyTypeFieldType extends TermBasedFieldType { + + LegacyTypeFieldType() { + super(NAME, false, true, true, TextSearchInfo.SIMPLE_MATCH_ONLY, Collections.emptyMap()); + } + + @Override + public String typeName() { + return CONTENT_TYPE; + } + + @Override + public boolean isSearchable() { + // The _type field is always searchable. + return true; + } + + @Override + public Query termQuery(Object value, SearchExecutionContext context) { + return SortedSetDocValuesField.newSlowExactQuery(name(), indexedValueForSearch(value)); + } + + @Override + public Query termsQuery(Collection values, SearchExecutionContext context) { + BytesRef[] bytesRefs = values.stream().map(this::indexedValueForSearch).toArray(BytesRef[]::new); + return new DocValuesTermsQuery(name(), bytesRefs); + } + + @Override + public Query rangeQuery( + Object lowerTerm, + Object upperTerm, + boolean includeLower, + boolean includeUpper, + SearchExecutionContext context + ) { + return SortedSetDocValuesField.newSlowRangeQuery( + name(), + lowerTerm == null ? null : indexedValueForSearch(lowerTerm), + upperTerm == null ? null : indexedValueForSearch(upperTerm), + includeLower, + includeUpper + ); + } + + @Override + public boolean mayExistInIndex(SearchExecutionContext context) { + return true; + } + + @Override + public ValueFetcher valueFetcher(SearchExecutionContext context, String format) { + return new StoredValueFetcher(context.lookup(), NAME); + } + } + + @Override + protected String contentType() { + return CONTENT_TYPE; + } +} diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperRegistry.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperRegistry.java index 0ed5e7682a365..034f056dda993 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperRegistry.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperRegistry.java @@ -26,6 +26,7 @@ public final class MapperRegistry { private final Map runtimeFieldParsers; private final Map metadataMapperParsers; private final Map metadataMapperParsers7x; + private final Map metadataMapperParsers5x; private final Function> fieldFilter; public MapperRegistry( @@ -40,6 +41,9 @@ public MapperRegistry( Map metadata7x = new LinkedHashMap<>(metadataMapperParsers); metadata7x.remove(NestedPathFieldMapper.NAME); this.metadataMapperParsers7x = metadata7x; + Map metadata5x = new LinkedHashMap<>(metadata7x); + metadata5x.put(LegacyTypeFieldMapper.NAME, LegacyTypeFieldMapper.PARSER); + this.metadataMapperParsers5x = metadata5x; this.fieldFilter = fieldFilter; } @@ -62,8 +66,11 @@ public Map getRuntimeFieldParsers() { public Map getMetadataMapperParsers(Version indexCreatedVersion) { if (indexCreatedVersion.onOrAfter(Version.V_8_0_0)) { return metadataMapperParsers; + } else if (indexCreatedVersion.major < 6) { + return metadataMapperParsers5x; + } else { + return metadataMapperParsers7x; } - return metadataMapperParsers7x; } /** diff --git a/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java b/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java index 5b3fb0a331367..b8ab56bf69400 100644 --- a/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java +++ b/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java @@ -34,6 +34,7 @@ import org.elasticsearch.cluster.metadata.MappingMetadata; import org.elasticsearch.cluster.routing.Murmur3HashFunction; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -136,7 +137,7 @@ && randomBoolean()) { String id = "testdoc" + i; expectedIds.add(id); // use multiple types for ES versions < 6.0.0 - String type = "doc" + (oldVersion.before(Version.fromString("6.0.0")) ? Murmur3HashFunction.hash(id) % 2 : 0); + String type = getType(oldVersion, id); Request doc = new Request("PUT", "/test/" + type + "/" + id); doc.addParameter("refresh", "true"); doc.setJsonEntity(sourceForDoc(i)); @@ -146,7 +147,7 @@ && randomBoolean()) { for (int i = 0; i < extraDocs; i++) { String id = randomFrom(expectedIds); expectedIds.remove(id); - String type = "doc" + (oldVersion.before(Version.fromString("6.0.0")) ? Murmur3HashFunction.hash(id) % 2 : 0); + String type = getType(oldVersion, id); Request doc = new Request("DELETE", "/test/" + type + "/" + id); doc.addParameter("refresh", "true"); oldEs.performRequest(doc); @@ -267,6 +268,10 @@ && randomBoolean()) { } } + private String getType(Version oldVersion, String id) { + return "doc" + (oldVersion.before(Version.fromString("6.0.0")) ? Math.abs(Murmur3HashFunction.hash(id) % 2) : 0); + } + private static String sourceForDoc(int i) { return "{\"test\":\"test" + i + "\",\"val\":" + i + "}"; } @@ -337,7 +342,7 @@ private void restoreMountAndVerify( } // run a search against the index - assertDocs("restored_test", numDocs, expectedIds, client, sourceOnlyRepository); + assertDocs("restored_test", numDocs, expectedIds, client, sourceOnlyRepository, oldVersion); // mount as full copy searchable snapshot RestoreSnapshotResponse mountSnapshotResponse = client.searchableSnapshots() @@ -363,7 +368,7 @@ private void restoreMountAndVerify( ); // run a search against the index - assertDocs("mounted_full_copy_test", numDocs, expectedIds, client, sourceOnlyRepository); + assertDocs("mounted_full_copy_test", numDocs, expectedIds, client, sourceOnlyRepository, oldVersion); // mount as shared cache searchable snapshot mountSnapshotResponse = client.searchableSnapshots() @@ -378,12 +383,18 @@ private void restoreMountAndVerify( assertEquals(numberOfShards, mountSnapshotResponse.getRestoreInfo().successfulShards()); // run a search against the index - assertDocs("mounted_shared_cache_test", numDocs, expectedIds, client, sourceOnlyRepository); + assertDocs("mounted_shared_cache_test", numDocs, expectedIds, client, sourceOnlyRepository, oldVersion); } @SuppressWarnings("removal") - private void assertDocs(String index, int numDocs, Set expectedIds, RestHighLevelClient client, boolean sourceOnlyRepository) - throws IOException { + private void assertDocs( + String index, + int numDocs, + Set expectedIds, + RestHighLevelClient client, + boolean sourceOnlyRepository, + Version oldVersion + ) throws IOException { // run a search against the index SearchResponse searchResponse = client.search(new SearchRequest(index), RequestOptions.DEFAULT); logger.info(searchResponse); @@ -420,9 +431,9 @@ private void assertDocs(String index, int numDocs, Set expectedIds, Rest // check that doc values can be accessed by (reverse) sorting on numeric val field // first add mapping for field (this will be done automatically in the future) XContentBuilder mappingBuilder = JsonXContent.contentBuilder(); - mappingBuilder.startObject().startObject("properties").startObject("val"); - mappingBuilder.field("type", "long"); - mappingBuilder.endObject().endObject().endObject(); + mappingBuilder.startObject().startObject("properties"); + mappingBuilder.startObject("val").field("type", "long").endObject(); + mappingBuilder.endObject().endObject(); assertTrue( client.indices().putMapping(new PutMappingRequest(index).source(mappingBuilder), RequestOptions.DEFAULT).isAcknowledged() ); @@ -442,6 +453,24 @@ private void assertDocs(String index, int numDocs, Set expectedIds, Rest expectedIds.stream().sorted(Comparator.comparingInt(this::getIdAsNumeric).reversed()).collect(Collectors.toList()), Arrays.stream(searchResponse.getHits().getHits()).map(SearchHit::getId).collect(Collectors.toList()) ); + + if (oldVersion.before(Version.fromString("6.0.0"))) { + // search on _type and check that results contain _type information + String randomType = getType(oldVersion, randomFrom(expectedIds)); + long typeCount = expectedIds.stream().filter(idd -> getType(oldVersion, idd).equals(randomType)).count(); + searchResponse = client.search( + new SearchRequest(index).source(SearchSourceBuilder.searchSource().query(QueryBuilders.termQuery("_type", randomType))), + RequestOptions.DEFAULT + ); + logger.info(searchResponse); + assertEquals(typeCount, searchResponse.getHits().getTotalHits().value); + for (SearchHit hit : searchResponse.getHits().getHits()) { + DocumentField typeField = hit.field("_type"); + assertNotNull(typeField); + assertThat(typeField.getValue(), instanceOf(String.class)); + assertEquals(randomType, typeField.getValue()); + } + } } } From 836d9bc7861adbe13eb063176048919bf35ad4e2 Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Thu, 27 Jan 2022 08:27:35 -0500 Subject: [PATCH 069/100] [DOCS] Add redirect for 'Java client and security' page (#83180) Adds a redirect for the [Java client and security](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/java-clients.html) page, which has been removed in 8.0+. We saw this missing page create several broken links in https://github.com/elastic/docs/pull/2312. I've opened separate PRs to update those links. This adds a redirect for anyone who otherwise lands on the page. --- docs/reference/redirects.asciidoc | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index 04b06d31c9c1a..87011f45516ed 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -3,6 +3,15 @@ The following pages have moved or been deleted. +[role="exclude",id="java-clients"] +=== Java transport client and security + +The Java transport client has been removed. Use the +https://www.elastic.co/guide/en/elasticsearch/client/java-rest/7.15/java-rest-high.html[Java +high-level REST client] instead. For migration steps, refer to the +https://www.elastic.co/guide/en/elasticsearch/client/java-rest/7.15/java-rest-high-level-migration.html[migration +guide]. + // [START] Snapshot and restore [role="exclude",id="snapshot-lifecycle-management"] From 4ad7814a76335649bad1777dde345708778ca52f Mon Sep 17 00:00:00 2001 From: Dan Hermann Date: Thu, 27 Jan 2022 07:44:52 -0600 Subject: [PATCH 070/100] Expose 'features' option in Get Index API (#83083) --- docs/changelog/83083.yaml | 6 ++ .../rest-api-spec/api/indices.get.json | 10 +++ .../test/indices.get/10_basic.yml | 15 ++++ .../admin/indices/get/GetIndexRequest.java | 32 +++++++- .../admin/indices/RestGetIndicesAction.java | 1 + .../indices/get/GetIndexRequestTests.java | 77 +++++++++++++++++++ .../elasticsearch/rest/RestRequestTests.java | 4 +- 7 files changed, 142 insertions(+), 3 deletions(-) create mode 100644 docs/changelog/83083.yaml create mode 100644 server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexRequestTests.java diff --git a/docs/changelog/83083.yaml b/docs/changelog/83083.yaml new file mode 100644 index 0000000000000..c0fed25e8ce72 --- /dev/null +++ b/docs/changelog/83083.yaml @@ -0,0 +1,6 @@ +pr: 83083 +summary: Expose 'features' option in Get Index API +area: Indices APIs +type: enhancement +issues: + - 82948 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get.json index fb4dee07234cc..e0cd96e346a7b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get.json @@ -50,6 +50,16 @@ "default":"open", "description":"Whether wildcard expressions should get expanded to open or closed indices (default: open)" }, + "features":{ + "type":"enum", + "options":[ + "aliases", + "mappings", + "settings" + ], + "default":"aliases,mappings,settings", + "description":"Return only information on specified index features" + }, "flat_settings":{ "type":"boolean", "description":"Return settings in flat format (default: false)" diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get/10_basic.yml index eb30f08abcaf1..c64b872c3e8ad 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get/10_basic.yml @@ -169,3 +169,18 @@ setup: catch: bad_request indices.get: index: _foo + +--- +"Should return only selected features": + - skip: + version: " - 8.0.99" + reason: "features option added in 8.1.0" + + - do: + indices.get: + index: test_index + features: aliases,settings + + - is_true: test_index.aliases + - is_true: test_index.settings + - match: { test_index.mappings: {}} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java index a493647e58851..8c821e90d9373 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java @@ -13,8 +13,14 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.ArrayUtils; +import org.elasticsearch.rest.RestRequest; import java.io.IOException; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Set; /** * A request to retrieve information about an index. @@ -50,9 +56,33 @@ public static Feature fromId(byte id) { } return FEATURES[id]; } + + public static Feature[] fromRequest(RestRequest request) { + if (request.hasParam("features")) { + String[] featureNames = request.param("features").split(","); + Set features = new HashSet<>(); + List invalidFeatures = new ArrayList<>(); + for (int k = 0; k < featureNames.length; k++) { + try { + features.add(Feature.valueOf(featureNames[k].toUpperCase(Locale.ROOT))); + } catch (IllegalArgumentException e) { + invalidFeatures.add(featureNames[k]); + } + } + if (invalidFeatures.size() > 0) { + throw new IllegalArgumentException( + String.format(Locale.ROOT, "Invalid features specified [%s]", String.join(",", invalidFeatures)) + ); + } else { + return features.toArray(Feature[]::new); + } + } else { + return DEFAULT_FEATURES; + } + } } - private static final Feature[] DEFAULT_FEATURES = new Feature[] { Feature.ALIASES, Feature.MAPPINGS, Feature.SETTINGS }; + static final Feature[] DEFAULT_FEATURES = new Feature[] { Feature.ALIASES, Feature.MAPPINGS, Feature.SETTINGS }; private Feature[] features = DEFAULT_FEATURES; private boolean humanReadable = false; private transient boolean includeDefaults = false; diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndicesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndicesAction.java index 3e55b84a3cbbc..6bd1e35787ba5 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndicesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndicesAction.java @@ -69,6 +69,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC getIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getIndexRequest.masterNodeTimeout())); getIndexRequest.humanReadable(request.paramAsBoolean("human", false)); getIndexRequest.includeDefaults(request.paramAsBoolean("include_defaults", false)); + getIndexRequest.features(GetIndexRequest.Feature.fromRequest(request)); return channel -> client.admin().indices().getIndex(getIndexRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexRequestTests.java new file mode 100644 index 0000000000000..934711dfa7ec6 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexRequestTests.java @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.indices.get; + +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestRequestTests; +import org.elasticsearch.test.ESTestCase; + +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +import static org.hamcrest.Matchers.arrayContainingInAnyOrder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; + +public class GetIndexRequestTests extends ESTestCase { + + public void testFeaturesFromRequest() { + int numFeatures = randomIntBetween(1, GetIndexRequest.DEFAULT_FEATURES.length); + List featureNames = new ArrayList<>(); + List expectedFeatures = new ArrayList<>(); + for (int k = 0; k < numFeatures; k++) { + GetIndexRequest.Feature feature = randomValueOtherThanMany( + f -> featureNames.contains(f.name()), + () -> randomFrom(GetIndexRequest.DEFAULT_FEATURES) + ); + featureNames.add(feature.name()); + expectedFeatures.add(feature); + } + + RestRequest request = RestRequestTests.contentRestRequest("", Map.of("features", String.join(",", featureNames))); + GetIndexRequest.Feature[] featureArray = GetIndexRequest.Feature.fromRequest(request); + assertThat(featureArray, arrayContainingInAnyOrder(expectedFeatures.toArray(GetIndexRequest.Feature[]::new))); + } + + public void testDuplicateFeatures() { + int numFeatures = randomIntBetween(1, 5); + GetIndexRequest.Feature feature = randomFrom(GetIndexRequest.DEFAULT_FEATURES); + List featureList = new ArrayList<>(); + for (int k = 0; k < numFeatures; k++) { + featureList.add(feature.name()); + } + RestRequest request = RestRequestTests.contentRestRequest("", Map.of("features", String.join(",", featureList))); + GetIndexRequest.Feature[] features = GetIndexRequest.Feature.fromRequest(request); + assertThat(features.length, equalTo(1)); + assertThat(features[0], equalTo(feature)); + } + + public void testMissingFeatures() { + RestRequest request = RestRequestTests.contentRestRequest("", Map.of()); + GetIndexRequest.Feature[] features = GetIndexRequest.Feature.fromRequest(request); + assertThat(features, arrayContainingInAnyOrder(GetIndexRequest.DEFAULT_FEATURES)); + } + + public void testInvalidFeatures() { + int numFeatures = randomIntBetween(1, 4); + List invalidFeatures = new ArrayList<>(); + for (int k = 0; k < numFeatures; k++) { + invalidFeatures.add(randomAlphaOfLength(5)); + } + + RestRequest request = RestRequestTests.contentRestRequest("", Map.of("features", String.join(",", invalidFeatures))); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> GetIndexRequest.Feature.fromRequest(request)); + assertThat( + e.getMessage(), + containsString(String.format(Locale.ROOT, "Invalid features specified [%s]", String.join(",", invalidFeatures))) + ); + } +} diff --git a/server/src/test/java/org/elasticsearch/rest/RestRequestTests.java b/server/src/test/java/org/elasticsearch/rest/RestRequestTests.java index 8913034cd626a..d2799efef0ec7 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestRequestTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestRequestTests.java @@ -236,7 +236,7 @@ public void testRequiredContent() { assertEquals("unknown content type", e.getMessage()); } - private static RestRequest contentRestRequest(String content, Map params) { + public static RestRequest contentRestRequest(String content, Map params) { Map> headers = new HashMap<>(); headers.put("Content-Type", Collections.singletonList("application/json")); return contentRestRequest(content, params, headers); @@ -250,7 +250,7 @@ private static RestRequest contentRestRequest(String content, Map Date: Thu, 27 Jan 2022 15:22:49 +0100 Subject: [PATCH 071/100] Introduce flag for testing CCS compatibility (#81809) CCS works with a subset of APIs and features depending on the versions of the clusters being communicated with. Currently we limit this CCS compliance to one minor version backward and one minor forward. This change adds a setting useful for testing in clients like Kibana that can be turned on to check if a search request sent to one of the endpoints that are supporting CCS is compatible with a cluster that is on one minor version back. We do this by trying to serialize the request to a stream with the earlier version. Features and components that are not supported in that version should throw errors upon atempted serialization to indicate they are not compatible. In addition we need components extending NamedWriteable (e.g. new queries) to also error when they are written to a stream that has a version before the version they were released. --- .../mustache/MultiSearchTemplateIT.java | 42 ++++++- .../script/mustache/SearchTemplateIT.java | 35 +++++- .../query/plugin/CustomQueryParserIT.java | 2 + .../search/msearch/MultiSearchIT.java | 40 +++++++ .../indices/resolve/ResolveIndexAction.java | 8 ++ .../TransportFieldCapabilitiesAction.java | 8 ++ .../action/search/TransportSearchAction.java | 6 + .../action/search/TransportSearchHelper.java | 51 ++++++++- .../stream/VersionCheckingStreamOutput.java | 81 ++++++++++++++ .../common/settings/ClusterSettings.java | 1 + .../index/query/QueryBuilder.java | 10 +- .../search/SearchExtBuilder.java | 12 +- .../elasticsearch/search/SearchService.java | 6 + .../aggregations/AggregationBuilder.java | 10 +- .../PipelineAggregationBuilder.java | 10 +- .../search/rescore/QueryRescorerBuilder.java | 6 + .../search/rescore/RescorerBuilder.java | 4 +- .../search/sort/FieldSortBuilder.java | 5 + .../search/sort/GeoDistanceSortBuilder.java | 5 + .../search/sort/ScoreSortBuilder.java | 6 + .../search/sort/ScriptSortBuilder.java | 5 + .../search/sort/SortBuilder.java | 8 +- .../search/suggest/SuggestionBuilder.java | 4 +- .../CompletionSuggestionBuilder.java | 6 + .../phrase/PhraseSuggestionBuilder.java | 6 + .../suggest/term/TermSuggestionBuilder.java | 6 + .../TransportResolveIndexActionTests.java | 94 ++++++++++++++++ ...TransportFieldCapabilitiesActionTests.java | 103 ++++++++++++++++++ .../search/TransportSearchActionTests.java | 81 ++++++++++++++ .../search/TransportSearchHelperTests.java | 20 ++++ .../VersionCheckingStreamOutputTests.java | 54 +++++++++ .../query/SpanMultiTermQueryBuilderTests.java | 6 + .../search/RestMultiSearchActionTests.java | 6 - .../search/SearchModuleTests.java | 11 ++ .../search}/DummyQueryBuilder.java | 4 +- .../search}/DummyQueryParserPlugin.java | 13 ++- .../FailBeforeCurrentVersionQueryBuilder.java | 54 +++++++++ .../aggregations/AggregatorTestCase.java | 5 + .../xpack/search/AsyncSearchActionIT.java | 26 +++++ .../action/TransportTermsEnumAction.java | 6 + .../TransportTermsEnumActionTests.java | 42 +++++++ 41 files changed, 878 insertions(+), 30 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/common/io/stream/VersionCheckingStreamOutput.java create mode 100644 server/src/test/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveIndexActionTests.java create mode 100644 server/src/test/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesActionTests.java create mode 100644 server/src/test/java/org/elasticsearch/common/io/stream/VersionCheckingStreamOutputTests.java rename {server/src/test/java/org/elasticsearch/index/query/plugin => test/framework/src/main/java/org/elasticsearch/search}/DummyQueryBuilder.java (93%) rename {server/src/test/java/org/elasticsearch/index/query/plugin => test/framework/src/main/java/org/elasticsearch/search}/DummyQueryParserPlugin.java (80%) create mode 100644 test/framework/src/main/java/org/elasticsearch/search/FailBeforeCurrentVersionQueryBuilder.java diff --git a/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java b/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java index 35177c743f400..302016a19dfde 100644 --- a/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java +++ b/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java @@ -8,23 +8,30 @@ package org.elasticsearch.script.mustache; +import org.elasticsearch.Version; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.ScriptType; +import org.elasticsearch.script.mustache.MultiSearchTemplateResponse.Item; +import org.elasticsearch.search.DummyQueryParserPlugin; +import org.elasticsearch.search.SearchService; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.xcontent.json.JsonXContent; import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.arrayWithSize; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.instanceOf; @@ -34,7 +41,15 @@ public class MultiSearchTemplateIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { - return Collections.singleton(MustachePlugin.class); + return List.of(MustachePlugin.class, DummyQueryParserPlugin.class); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal, otherSettings)) + .put(SearchService.CCS_VERSION_CHECK_SETTING.getKey(), "true") + .build(); } public void testBasic() throws Exception { @@ -164,4 +179,27 @@ public void testBasic() throws Exception { assertThat(searchTemplateResponse5.hasResponse(), is(false)); assertThat(searchTemplateResponse5.getSource().utf8ToString(), equalTo("{\"query\":{\"terms\":{\"group\":[1,2,3,]}}}")); } + + /** + * Test that triggering the CCS compatibility check with a query that shouldn't go to the minor before Version.CURRENT works + */ + public void testCCSCheckCompatibility() throws Exception { + String templateString = """ + { + "source": "{ \\"query\\":{\\"fail_before_current_version\\":{}} }" + }"""; + SearchTemplateRequest searchTemplateRequest = SearchTemplateRequest.fromXContent( + createParser(JsonXContent.jsonXContent, templateString) + ); + searchTemplateRequest.setRequest(new SearchRequest()); + MultiSearchTemplateRequest request = new MultiSearchTemplateRequest(); + request.add(searchTemplateRequest); + MultiSearchTemplateResponse multiSearchTemplateResponse = client().execute(MultiSearchTemplateAction.INSTANCE, request).get(); + Item response = multiSearchTemplateResponse.getResponses()[0]; + assertTrue(response.isFailure()); + Exception ex = response.getFailure(); + assertThat(ex.getMessage(), containsString("[class org.elasticsearch.action.search.SearchRequest] is not compatible with version")); + assertThat(ex.getMessage(), containsString("'search.check_ccs_compatibility' setting is enabled.")); + assertEquals("This query isn't serializable to nodes before " + Version.CURRENT, ex.getCause().getMessage()); + } } diff --git a/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/SearchTemplateIT.java b/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/SearchTemplateIT.java index 751f933f5d775..7d9684334e702 100644 --- a/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/SearchTemplateIT.java +++ b/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/SearchTemplateIT.java @@ -8,12 +8,16 @@ package org.elasticsearch.script.mustache; import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptResponse; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.ScriptType; +import org.elasticsearch.search.DummyQueryParserPlugin; +import org.elasticsearch.search.SearchService; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.json.JsonXContent; @@ -23,7 +27,9 @@ import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; +import java.util.concurrent.ExecutionException; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; @@ -38,7 +44,12 @@ public class SearchTemplateIT extends ESSingleNodeTestCase { @Override protected Collection> getPlugins() { - return Collections.singleton(MustachePlugin.class); + return List.of(MustachePlugin.class, DummyQueryParserPlugin.class); + } + + @Override + protected Settings nodeSettings() { + return Settings.builder().put(SearchService.CCS_VERSION_CHECK_SETTING.getKey(), "true").build(); } @Before @@ -346,4 +357,26 @@ public void testIndexedTemplateWithArray() throws Exception { assertHitCount(searchResponse.getResponse(), 5); } + /** + * Test that triggering the CCS compatibility check with a query that shouldn't go to the minor before Version.CURRENT works + */ + public void testCCSCheckCompatibility() throws Exception { + String templateString = """ + { + "source": "{ \\"query\\":{\\"fail_before_current_version\\":{}} }" + }"""; + SearchTemplateRequest request = SearchTemplateRequest.fromXContent(createParser(JsonXContent.jsonXContent, templateString)); + request.setRequest(new SearchRequest()); + ExecutionException ex = expectThrows( + ExecutionException.class, + () -> client().execute(SearchTemplateAction.INSTANCE, request).get() + ); + assertThat( + ex.getCause().getMessage(), + containsString("[class org.elasticsearch.action.search.SearchRequest] is not compatible with version") + ); + assertThat(ex.getCause().getMessage(), containsString("'search.check_ccs_compatibility' setting is enabled.")); + assertEquals("This query isn't serializable to nodes before " + Version.CURRENT, ex.getCause().getCause().getMessage()); + } + } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/query/plugin/CustomQueryParserIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/query/plugin/CustomQueryParserIT.java index 182effc3626c1..2aee517aa4531 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/query/plugin/CustomQueryParserIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/query/plugin/CustomQueryParserIT.java @@ -10,6 +10,8 @@ import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.search.DummyQueryBuilder; +import org.elasticsearch.search.DummyQueryParserPlugin; import org.elasticsearch.test.ESIntegTestCase; import org.junit.Before; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/msearch/MultiSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/msearch/MultiSearchIT.java index 4de2f53423061..ecca33f08d9da 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/msearch/MultiSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/msearch/MultiSearchIT.java @@ -8,9 +8,13 @@ package org.elasticsearch.search.msearch; +import org.elasticsearch.Version; import org.elasticsearch.action.search.MultiSearchRequest; import org.elasticsearch.action.search.MultiSearchResponse; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.DummyQueryBuilder; +import org.elasticsearch.search.SearchService; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.xcontent.XContentType; @@ -22,6 +26,14 @@ public class MultiSearchIT extends ESIntegTestCase { + @Override + protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal, otherSettings)) + .put(SearchService.CCS_VERSION_CHECK_SETTING.getKey(), "true") + .build(); + } + public void testSimpleMultiSearch() { createIndex("test"); ensureGreen(); @@ -70,4 +82,32 @@ public void testSimpleMultiSearchMoreRequests() { } } + /** + * Test that triggering the CCS compatibility check with a query that shouldn't go to the minor before Version.CURRENT works + */ + public void testCCSCheckCompatibility() throws Exception { + createIndex("test"); + ensureGreen(); + client().prepareIndex("test").setId("1").setSource("field", "xxx").get(); + client().prepareIndex("test").setId("2").setSource("field", "yyy").get(); + refresh(); + MultiSearchResponse response = client().prepareMultiSearch() + .add(client().prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "xxx"))) + .add(client().prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "yyy"))) + .add(client().prepareSearch("test").setQuery(new DummyQueryBuilder() { + @Override + public Version getMinimalSupportedVersion() { + return Version.CURRENT; + } + })) + .get(); + + assertThat(response.getResponses().length, equalTo(3)); + assertHitCount(response.getResponses()[0].getResponse(), 1L); + assertHitCount(response.getResponses()[1].getResponse(), 1L); + assertTrue(response.getResponses()[2].isFailure()); + assertTrue( + response.getResponses()[2].getFailure().getMessage().contains("the 'search.check_ccs_compatibility' setting is enabled") + ); + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java index 1f4ef2b7a7c87..358ba10f2a74e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java @@ -34,6 +34,7 @@ import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.Index; +import org.elasticsearch.search.SearchService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.RemoteClusterAware; @@ -54,6 +55,8 @@ import java.util.SortedMap; import java.util.TreeMap; +import static org.elasticsearch.action.search.TransportSearchHelper.checkCCSVersionCompatibility; + public class ResolveIndexAction extends ActionType { public static final ResolveIndexAction INSTANCE = new ResolveIndexAction(); @@ -436,6 +439,7 @@ public static class TransportAction extends HandledTransportAction listener) { + if (ccsCheckCompatibility) { + checkCCSVersionCompatibility(request); + } final ClusterState clusterState = clusterService.state(); final Map remoteClusterIndices = remoteClusterService.groupIndices( request.indicesOptions(), diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java index 7429ec5e8b50a..71e4e19c4de1f 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java @@ -27,6 +27,7 @@ import org.elasticsearch.core.Tuple; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.search.SearchService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.RemoteClusterAware; @@ -45,6 +46,8 @@ import java.util.function.Predicate; import java.util.stream.Collectors; +import static org.elasticsearch.action.search.TransportSearchHelper.checkCCSVersionCompatibility; + public class TransportFieldCapabilitiesAction extends HandledTransportAction { public static final String ACTION_NODE_NAME = FieldCapabilitiesAction.NAME + "[n]"; @@ -55,6 +58,7 @@ public class TransportFieldCapabilitiesAction extends HandledTransportAction metadataFieldPred; + private final boolean ccsCheckCompatibility; @Inject public TransportFieldCapabilitiesAction( @@ -79,10 +83,14 @@ public TransportFieldCapabilitiesAction( FieldCapabilitiesNodeRequest::new, new NodeTransportHandler() ); + this.ccsCheckCompatibility = SearchService.CCS_VERSION_CHECK_SETTING.get(clusterService.getSettings()); } @Override protected void doExecute(Task task, FieldCapabilitiesRequest request, final ActionListener listener) { + if (ccsCheckCompatibility) { + checkCCSVersionCompatibility(request); + } // retrieve the initial timestamp in case the action is a cross cluster search long nowInMillis = request.nowInMillis() == null ? System.currentTimeMillis() : request.nowInMillis(); final ClusterState clusterState = clusterService.state(); diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 549506bc56d8b..11072b273ab91 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -95,6 +95,7 @@ import static org.elasticsearch.action.search.SearchType.DFS_QUERY_THEN_FETCH; import static org.elasticsearch.action.search.SearchType.QUERY_THEN_FETCH; +import static org.elasticsearch.action.search.TransportSearchHelper.checkCCSVersionCompatibility; import static org.elasticsearch.search.sort.FieldSortBuilder.hasPrimaryFieldSort; import static org.elasticsearch.threadpool.ThreadPool.Names.SYSTEM_CRITICAL_READ; import static org.elasticsearch.threadpool.ThreadPool.Names.SYSTEM_READ; @@ -132,6 +133,7 @@ public class TransportSearchAction extends HandledTransportAction buildPerIndexOriginalIndices( @@ -371,6 +374,9 @@ private void executeRequest( ActionListener rewriteListener = ActionListener.wrap(rewritten -> { final SearchContextId searchContext; final Map remoteClusterIndices; + if (ccsCheckCompatibility) { + checkCCSVersionCompatibility(rewritten); + } if (rewritten.pointInTimeBuilder() != null) { searchContext = rewritten.pointInTimeBuilder().getSearchContextId(namedWriteableRegistry); remoteClusterIndices = getIndicesFromSearchContexts(searchContext, rewritten.indicesOptions()); diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchHelper.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchHelper.java index cd40ddef723d5..a58585ffb8305 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchHelper.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchHelper.java @@ -8,10 +8,14 @@ package org.elasticsearch.action.search; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.ByteArrayStreamInput; import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.VersionCheckingStreamOutput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.SearchPhaseResult; +import org.elasticsearch.search.SearchService; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.internal.InternalScrollSearchRequest; import org.elasticsearch.search.internal.ShardSearchContextId; @@ -20,8 +24,9 @@ import java.io.IOException; import java.io.UncheckedIOException; import java.util.Base64; +import java.util.List; -final class TransportSearchHelper { +public final class TransportSearchHelper { private static final String INCLUDE_CONTEXT_UUID = "include_context_uuid"; @@ -91,6 +96,50 @@ static ParsedScrollId parseScrollId(String scrollId) { } } + private static final List ALL_VERSIONS = Version.getDeclaredVersions(Version.class); + private static final Version CCS_CHECK_VERSION = getPreviousMinorSeries(Version.CURRENT); + + /** + * Using the 'search.check_ccs_compatibility' setting, clients can ask for an early + * check that inspects the incoming request and tries to verify that it can be handled by + * a CCS compliant earlier version, e.g. currently a N-1 version where N is the current minor. + * + * Checking the compatibility involved serializing the request to a stream output that acts like + * it was on the previous minor version. This should e.g. trigger errors for {@link Writeable} parts of + * the requests that were not available in those versions. + */ + public static void checkCCSVersionCompatibility(Writeable writeableRequest) { + try { + writeableRequest.writeTo(new VersionCheckingStreamOutput(CCS_CHECK_VERSION)); + } catch (Exception e) { + // if we cannot serialize, raise this as an error to indicate to the caller that CCS has problems with this request + throw new IllegalArgumentException( + "[" + + writeableRequest.getClass() + + "] is not compatible with version " + + CCS_CHECK_VERSION + + " and the '" + + SearchService.CCS_VERSION_CHECK_SETTING.getKey() + + "' setting is enabled.", + e + ); + } + } + + /** + * Returns the first minor version previous to the minor version passed in. + * I.e 8.2.1 will return 8.1.0 + */ + static Version getPreviousMinorSeries(Version current) { + for (int i = ALL_VERSIONS.size() - 1; i >= 0; i--) { + Version v = ALL_VERSIONS.get(i); + if (v.before(current) && (v.minor < current.minor || v.major < current.major)) { + return Version.fromId(v.major * 1000000 + v.minor * 10000 + 99); + } + } + throw new IllegalArgumentException("couldn't find any released versions of the minor before [" + current + "]"); + } + private TransportSearchHelper() { } diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/VersionCheckingStreamOutput.java b/server/src/main/java/org/elasticsearch/common/io/stream/VersionCheckingStreamOutput.java new file mode 100644 index 0000000000000..bd9ace99eb276 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/io/stream/VersionCheckingStreamOutput.java @@ -0,0 +1,81 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.io.stream; + +import org.elasticsearch.Version; +import org.elasticsearch.core.Nullable; + +import java.io.IOException; + +/** + * This {@link StreamOutput} writes nowhere. It can be used to check if serialization would + * be successful writing to a specific version. + */ +public class VersionCheckingStreamOutput extends StreamOutput { + + public VersionCheckingStreamOutput(Version version) { + setVersion(version); + } + + @Override + public void writeByte(byte b) throws IOException { + // no-op + } + + @Override + public void writeBytes(byte[] b, int offset, int length) throws IOException { + // no-op + } + + @Override + public void flush() throws IOException { + // no-op + + } + + @Override + public void close() throws IOException { + // no-op + + } + + @Override + public void reset() throws IOException { + // no-op + } + + @Override + public void writeNamedWriteable(NamedWriteable namedWriteable) throws IOException { + if (namedWriteable instanceof VersionedNamedWriteable vnw) { + checkVersionCompatibility(vnw); + } + super.writeNamedWriteable(namedWriteable); + } + + @Override + public void writeOptionalNamedWriteable(@Nullable NamedWriteable namedWriteable) throws IOException { + if (namedWriteable != null && namedWriteable instanceof VersionedNamedWriteable vnw) { + checkVersionCompatibility(vnw); + } + super.writeOptionalNamedWriteable(namedWriteable); + } + + private void checkVersionCompatibility(VersionedNamedWriteable namedWriteable) { + if (namedWriteable.getMinimalSupportedVersion().after(getVersion())) { + throw new IllegalArgumentException( + "[" + + namedWriteable.getWriteableName() + + "] was released first in version " + + namedWriteable.getMinimalSupportedVersion() + + ", failed compatibility check trying to send it to node with version " + + getVersion() + ); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 618c128a76fd2..f46de609296dc 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -407,6 +407,7 @@ public void apply(Settings value, Settings current, Settings previous) { SearchService.KEEPALIVE_INTERVAL_SETTING, SearchService.MAX_KEEPALIVE_SETTING, SearchService.ALLOW_EXPENSIVE_QUERIES, + SearchService.CCS_VERSION_CHECK_SETTING, MultiBucketConsumerService.MAX_BUCKET_SETTING, SearchService.LOW_LEVEL_CANCELLATION_SETTING, SearchService.MAX_OPEN_SCROLL_CONTEXT, diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/QueryBuilder.java index 1480a70a48c3f..fb2da07ca96d6 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryBuilder.java @@ -9,12 +9,13 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Query; -import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.VersionedNamedWriteable; import org.elasticsearch.xcontent.ToXContentObject; import java.io.IOException; -public interface QueryBuilder extends NamedWriteable, ToXContentObject, Rewriteable { +public interface QueryBuilder extends VersionedNamedWriteable, ToXContentObject, Rewriteable { /** * Converts this QueryBuilder to a lucene {@link Query}. @@ -66,4 +67,9 @@ public interface QueryBuilder extends NamedWriteable, ToXContentObject, Rewritea default QueryBuilder rewrite(QueryRewriteContext queryRewriteContext) throws IOException { return this; } + + @Override + default Version getMinimalSupportedVersion() { + return Version.V_EMPTY; + } } diff --git a/server/src/main/java/org/elasticsearch/search/SearchExtBuilder.java b/server/src/main/java/org/elasticsearch/search/SearchExtBuilder.java index e49d4ee78ca2e..26a5f630461cd 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchExtBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/SearchExtBuilder.java @@ -8,9 +8,10 @@ package org.elasticsearch.search; -import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.VersionedNamedWriteable; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.plugins.SearchPlugin; @@ -32,9 +33,16 @@ * * @see SearchExtSpec */ -public abstract class SearchExtBuilder implements NamedWriteable, ToXContentFragment { +public abstract class SearchExtBuilder implements VersionedNamedWriteable, ToXContentFragment { + @Override public abstract int hashCode(); + @Override public abstract boolean equals(Object obj); + + @Override + public Version getMinimalSupportedVersion() { + return Version.V_EMPTY; + } } diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index 416e4e5a7a317..91877f8b730a1 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -170,6 +170,12 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv Property.Dynamic ); + public static final Setting CCS_VERSION_CHECK_SETTING = Setting.boolSetting( + "search.check_ccs_compatibility", + false, + Property.NodeScope + ); + /** * Enables low-level, frequent search cancellation checks. Enabling low-level checks will make long running searches to react * to the cancellation request faster. It will produce more cancellation checks but benchmarking has shown these did not diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java index a89f51854320c..9c78ff3864f0e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java @@ -7,8 +7,9 @@ */ package org.elasticsearch.search.aggregations; +import org.elasticsearch.Version; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.common.io.stream.VersionedNamedWriteable; import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.Rewriteable; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; @@ -29,7 +30,7 @@ */ public abstract class AggregationBuilder implements - NamedWriteable, + VersionedNamedWriteable, ToXContentFragment, BaseAggregationBuilder, Rewriteable { @@ -190,6 +191,11 @@ public String toString() { return Strings.toString(this); } + @Override + public Version getMinimalSupportedVersion() { + return Version.V_EMPTY; + } + /** * Return true if any of the child aggregations is a time-series aggregation that requires an in-order execution */ diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/PipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/PipelineAggregationBuilder.java index e6a5842409a6b..85b6ce2c3893d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/PipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/PipelineAggregationBuilder.java @@ -7,10 +7,11 @@ */ package org.elasticsearch.search.aggregations; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ValidateActions; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.common.io.stream.VersionedNamedWriteable; import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.Rewriteable; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; @@ -31,7 +32,7 @@ */ public abstract class PipelineAggregationBuilder implements - NamedWriteable, + VersionedNamedWriteable, BaseAggregationBuilder, ToXContentFragment, Rewriteable { @@ -258,4 +259,9 @@ public String toString() { public PipelineAggregationBuilder rewrite(QueryRewriteContext context) throws IOException { return this; } + + @Override + public Version getMinimalSupportedVersion() { + return Version.V_EMPTY; + } } diff --git a/server/src/main/java/org/elasticsearch/search/rescore/QueryRescorerBuilder.java b/server/src/main/java/org/elasticsearch/search/rescore/QueryRescorerBuilder.java index ced8b487dbe5d..96872ae342e94 100644 --- a/server/src/main/java/org/elasticsearch/search/rescore/QueryRescorerBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/rescore/QueryRescorerBuilder.java @@ -8,6 +8,7 @@ package org.elasticsearch.search.rescore; +import org.elasticsearch.Version; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -91,6 +92,11 @@ public String getWriteableName() { return NAME; } + @Override + public Version getMinimalSupportedVersion() { + return Version.V_EMPTY; + } + /** * @return the query used for this rescore query */ diff --git a/server/src/main/java/org/elasticsearch/search/rescore/RescorerBuilder.java b/server/src/main/java/org/elasticsearch/search/rescore/RescorerBuilder.java index bfe7c6bfd3200..897c14409b5fd 100644 --- a/server/src/main/java/org/elasticsearch/search/rescore/RescorerBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/rescore/RescorerBuilder.java @@ -10,9 +10,9 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.VersionedNamedWriteable; import org.elasticsearch.index.query.Rewriteable; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.xcontent.ParseField; @@ -28,7 +28,7 @@ */ public abstract class RescorerBuilder> implements - NamedWriteable, + VersionedNamedWriteable, ToXContentObject, Rewriteable> { public static final int DEFAULT_WINDOW_SIZE = 10; diff --git a/server/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java index 2efae46d05627..46c88b8915782 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java @@ -709,6 +709,11 @@ public String getWriteableName() { return NAME; } + @Override + public Version getMinimalSupportedVersion() { + return Version.V_EMPTY; + } + /** * Creates a new {@link FieldSortBuilder} from the query held by the {@link XContentParser} in * {@link org.elasticsearch.xcontent.XContent} format. diff --git a/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java index aa1b0f0833454..0b3b6ac8b92d2 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java @@ -351,6 +351,11 @@ public String getWriteableName() { return NAME; } + @Override + public Version getMinimalSupportedVersion() { + return Version.V_EMPTY; + } + @Override public boolean equals(Object object) { if (this == object) { diff --git a/server/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java index 1da72092625da..915788af6373b 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java @@ -11,6 +11,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Scorable; import org.apache.lucene.search.SortField; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.BigArrays; @@ -157,6 +158,11 @@ public String getWriteableName() { return NAME; } + @Override + public Version getMinimalSupportedVersion() { + return Version.V_EMPTY; + } + @Override public ScoreSortBuilder rewrite(QueryRewriteContext ctx) throws IOException { return this; diff --git a/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java index 9eed79459e5f2..e1c0b94291c8c 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java @@ -385,6 +385,11 @@ public String getWriteableName() { return NAME; } + @Override + public Version getMinimalSupportedVersion() { + return Version.V_EMPTY; + } + public enum ScriptSortType implements Writeable { /** script sort for a string value **/ STRING, diff --git a/server/src/main/java/org/elasticsearch/search/sort/SortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/SortBuilder.java index 7b45f19943274..bc4ef6a2d108b 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/SortBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/sort/SortBuilder.java @@ -14,7 +14,7 @@ import org.apache.lucene.search.join.ToChildBlockJoinQuery; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.common.io.stream.VersionedNamedWriteable; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.core.RestApiVersion; @@ -39,7 +39,11 @@ import static org.elasticsearch.index.query.AbstractQueryBuilder.parseInnerQueryBuilder; import static org.elasticsearch.search.sort.NestedSortBuilder.FILTER_FIELD; -public abstract class SortBuilder> implements NamedWriteable, ToXContentObject, Rewriteable> { +public abstract class SortBuilder> + implements + VersionedNamedWriteable, + ToXContentObject, + Rewriteable> { protected SortOrder order = SortOrder.ASC; diff --git a/server/src/main/java/org/elasticsearch/search/suggest/SuggestionBuilder.java b/server/src/main/java/org/elasticsearch/search/suggest/SuggestionBuilder.java index 785e7d555d62f..de4279d79919b 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/SuggestionBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/SuggestionBuilder.java @@ -11,9 +11,9 @@ import org.apache.lucene.analysis.Analyzer; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.ParsingException; -import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.VersionedNamedWriteable; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.SearchExecutionContext; @@ -29,7 +29,7 @@ /** * Base class for the different suggestion implementations. */ -public abstract class SuggestionBuilder> implements NamedWriteable, ToXContentFragment { +public abstract class SuggestionBuilder> implements VersionedNamedWriteable, ToXContentFragment { protected final String field; protected String text; diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionBuilder.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionBuilder.java index e66c5c06c1030..d10b10c09d66a 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionBuilder.java @@ -8,6 +8,7 @@ package org.elasticsearch.search.suggest.completion; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.Version; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -325,6 +326,11 @@ public String getWriteableName() { return SUGGESTION_NAME; } + @Override + public Version getMinimalSupportedVersion() { + return Version.V_EMPTY; + } + @Override protected boolean doEquals(CompletionSuggestionBuilder other) { return skipDuplicates == other.skipDuplicates diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java index 65e17cc88890d..f7a415c3cbb07 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java @@ -9,6 +9,7 @@ import org.apache.lucene.analysis.Analyzer; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.Version; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -704,6 +705,11 @@ public String getWriteableName() { return SUGGESTION_NAME; } + @Override + public Version getMinimalSupportedVersion() { + return Version.V_EMPTY; + } + @Override protected boolean doEquals(PhraseSuggestionBuilder other) { return Objects.equals(maxErrors, other.maxErrors) diff --git a/server/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionBuilder.java b/server/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionBuilder.java index 9764d256f0b79..bf9f26864bfbe 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionBuilder.java @@ -15,6 +15,7 @@ import org.apache.lucene.search.spell.NGramDistance; import org.apache.lucene.search.spell.StringDistance; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.Version; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -456,6 +457,11 @@ public String getWriteableName() { return SUGGESTION_NAME; } + @Override + public Version getMinimalSupportedVersion() { + return Version.V_EMPTY; + } + @Override protected boolean doEquals(TermSuggestionBuilder other) { return Objects.equals(suggestMode, other.suggestMode) diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveIndexActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveIndexActionTests.java new file mode 100644 index 0000000000000..23facdf2fa0f4 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveIndexActionTests.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.indices.resolve; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilter; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.search.SearchService; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.containsString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class TransportResolveIndexActionTests extends ESTestCase { + + private final ThreadPool threadPool = new TestThreadPool(getClass().getName()); + + @Override + public void tearDown() throws Exception { + super.tearDown(); + ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS); + } + + public void testCCSCompatibilityCheck() throws Exception { + Settings settings = Settings.builder() + .put("node.name", TransportResolveIndexActionTests.class.getSimpleName()) + .put(SearchService.CCS_VERSION_CHECK_SETTING.getKey(), "true") + .build(); + ActionFilters actionFilters = mock(ActionFilters.class); + when(actionFilters.filters()).thenReturn(new ActionFilter[0]); + try { + TransportService transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool); + + ResolveIndexAction.Request request = new ResolveIndexAction.Request(new String[] { "test" }) { + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + if (out.getVersion().before(Version.CURRENT)) { + throw new IllegalArgumentException("This request isn't serializable to nodes before " + Version.CURRENT); + } + } + }; + + ClusterService clusterService = new ClusterService( + settings, + new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool + ); + ResolveIndexAction.TransportAction action = new ResolveIndexAction.TransportAction( + transportService, + clusterService, + threadPool, + actionFilters, + null + ); + + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> action.doExecute(null, request, new ActionListener() { + @Override + public void onResponse(ResolveIndexAction.Response response) {} + + @Override + public void onFailure(Exception e) {} + }) + ); + + assertThat(ex.getMessage(), containsString("not compatible with version")); + assertThat(ex.getMessage(), containsString("and the 'search.check_ccs_compatibility' setting is enabled.")); + assertEquals("This request isn't serializable to nodes before " + Version.CURRENT, ex.getCause().getMessage()); + } finally { + assertTrue(ESTestCase.terminate(threadPool)); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesActionTests.java b/server/src/test/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesActionTests.java new file mode 100644 index 0000000000000..77e1c990ff690 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesActionTests.java @@ -0,0 +1,103 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.fieldcaps; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilter; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.search.DummyQueryBuilder; +import org.elasticsearch.search.SearchService; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.util.Collections; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.containsString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class TransportFieldCapabilitiesActionTests extends ESTestCase { + + private final ThreadPool threadPool = new TestThreadPool(getClass().getName()); + + @Override + public void tearDown() throws Exception { + super.tearDown(); + ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS); + } + + public void testCCSCompatibilityCheck() throws Exception { + Settings settings = Settings.builder() + .put("node.name", TransportFieldCapabilitiesActionTests.class.getSimpleName()) + .put(SearchService.CCS_VERSION_CHECK_SETTING.getKey(), "true") + .build(); + ActionFilters actionFilters = mock(ActionFilters.class); + when(actionFilters.filters()).thenReturn(new ActionFilter[0]); + try { + TransportService transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool); + + FieldCapabilitiesRequest fieldCapsRequest = new FieldCapabilitiesRequest(); + fieldCapsRequest.indexFilter(new DummyQueryBuilder() { + @Override + protected void doWriteTo(StreamOutput out) throws IOException { + if (out.getVersion().before(Version.CURRENT)) { + throw new IllegalArgumentException("This query isn't serializable to nodes before " + Version.CURRENT); + } + } + }); + + IndicesService indicesService = mock(IndicesService.class); + when(indicesService.getAllMetadataFields()).thenReturn(Collections.singleton("_index")); + ClusterService clusterService = new ClusterService( + settings, + new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool + ); + TransportFieldCapabilitiesAction action = new TransportFieldCapabilitiesAction( + transportService, + clusterService, + threadPool, + actionFilters, + indicesService, + null + ); + + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> action.doExecute(null, fieldCapsRequest, new ActionListener() { + @Override + public void onResponse(FieldCapabilitiesResponse response) {} + + @Override + public void onFailure(Exception e) {} + }) + ); + + assertThat( + ex.getMessage(), + containsString("[class org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest] is not compatible with version") + ); + assertThat(ex.getMessage(), containsString("and the 'search.check_ccs_compatibility' setting is enabled.")); + assertEquals("This query isn't serializable to nodes before " + Version.CURRENT, ex.getCause().getMessage()); + } finally { + assertTrue(ESTestCase.terminate(threadPool)); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java index 68f9162d615c6..db749b02ad8c0 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java @@ -17,8 +17,11 @@ import org.elasticsearch.action.OriginalIndicesTests; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; +import org.elasticsearch.action.support.ActionFilter; +import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.replication.ClusterStateCreationUtils; +import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlocks; @@ -30,8 +33,11 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.core.TimeValue; @@ -41,12 +47,16 @@ import org.elasticsearch.index.query.InnerHitBuilder; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.TermsQueryBuilder; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.search.DummyQueryBuilder; import org.elasticsearch.search.Scroll; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; +import org.elasticsearch.search.SearchService; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.aggregations.AggregationReduceContext; import org.elasticsearch.search.aggregations.InternalAggregations; @@ -74,6 +84,7 @@ import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; +import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -102,6 +113,9 @@ import static org.hamcrest.CoreMatchers.startsWith; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.hasSize; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class TransportSearchActionTests extends ESTestCase { @@ -1356,4 +1370,71 @@ public void testLocalShardIteratorFromPointInTime() { assertTrue(anotherShardIterator.isPresent()); assertThat(anotherShardIterator.get().getTargetNodeIds(), hasSize(1)); } + + public void testCCSCompatibilityCheck() throws Exception { + Settings settings = Settings.builder() + .put("node.name", TransportSearchAction.class.getSimpleName()) + .put(SearchService.CCS_VERSION_CHECK_SETTING.getKey(), "true") + .build(); + ActionFilters actionFilters = mock(ActionFilters.class); + when(actionFilters.filters()).thenReturn(new ActionFilter[0]); + ThreadPool threadPool = new ThreadPool(settings); + try { + TransportService transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool); + + SearchRequest searchRequest = new SearchRequest(); + searchRequest.source(new SearchSourceBuilder().query(new DummyQueryBuilder() { + @Override + protected void doWriteTo(StreamOutput out) throws IOException { + throw new IllegalArgumentException("This query isn't serializable to nodes before " + Version.CURRENT); + } + })); + NodeClient client = new NodeClient(settings, threadPool); + + SearchService searchService = mock(SearchService.class); + when(searchService.getRewriteContext(any())).thenReturn(new QueryRewriteContext(null, null, null, null)); + ClusterService clusterService = new ClusterService( + settings, + new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool + ); + TransportSearchAction action = new TransportSearchAction( + threadPool, + new NoneCircuitBreakerService(), + transportService, + searchService, + new SearchTransportService(transportService, client, null), + null, + clusterService, + actionFilters, + null, + null, + null + ); + + CountDownLatch latch = new CountDownLatch(1); + action.doExecute(null, searchRequest, new ActionListener<>() { + + @Override + public void onResponse(SearchResponse response) { + latch.countDown(); + fail("should not be called"); + } + + @Override + public void onFailure(Exception ex) { + assertThat( + ex.getMessage(), + containsString("[class org.elasticsearch.action.search.SearchRequest] is not compatible with version") + ); + assertThat(ex.getMessage(), containsString("and the 'search.check_ccs_compatibility' setting is enabled.")); + assertEquals("This query isn't serializable to nodes before " + Version.CURRENT, ex.getCause().getMessage()); + latch.countDown(); + } + }); + latch.await(); + } finally { + assertTrue(ESTestCase.terminate(threadPool)); + } + } } diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportSearchHelperTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportSearchHelperTests.java index cc05269cae66e..ed3751eaecf6e 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportSearchHelperTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportSearchHelperTests.java @@ -16,6 +16,8 @@ import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.test.ESTestCase; +import java.util.List; + import static org.hamcrest.Matchers.equalTo; public class TransportSearchHelperTests extends ESTestCase { @@ -66,4 +68,22 @@ public void testParseScrollId() { assertEquals(42, parseScrollId.getContext()[2].getSearchContextId().getId()); assertThat(parseScrollId.getContext()[2].getSearchContextId().getSessionId(), equalTo("c")); } + + public void testGetPreviousMinorSeries() throws Exception { + final List declaredVersions = Version.getDeclaredVersions(Version.class); + Version randomVersion = randomValueOtherThanMany(v -> v.before(Version.V_7_1_0), () -> randomFrom(declaredVersions)); + Version previousFirstMinor = TransportSearchHelper.getPreviousMinorSeries(randomVersion); + assertTrue(previousFirstMinor.before(randomVersion)); + assertTrue(previousFirstMinor.revision == 0); + for (int i = declaredVersions.indexOf(previousFirstMinor); i < declaredVersions.indexOf(randomVersion); i++) { + Version version = declaredVersions.get(i); + assertTrue(version.before(randomVersion)); + if (randomVersion.major == previousFirstMinor.major) { + assertTrue(previousFirstMinor.minor == randomVersion.minor - 1); + } else { + assertTrue((randomVersion.major - 1) == previousFirstMinor.major); + assertTrue(randomVersion.minor == 0); + } + } + } } diff --git a/server/src/test/java/org/elasticsearch/common/io/stream/VersionCheckingStreamOutputTests.java b/server/src/test/java/org/elasticsearch/common/io/stream/VersionCheckingStreamOutputTests.java new file mode 100644 index 0000000000000..43766005be5f4 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/io/stream/VersionCheckingStreamOutputTests.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.io.stream; + +import org.elasticsearch.Version; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; + +import java.io.IOException; + +public class VersionCheckingStreamOutputTests extends ESTestCase { + + private static class DummyNamedWriteable implements VersionedNamedWriteable { + + @Override + public String getWriteableName() { + return "test_writable"; + } + + @Override + public void writeTo(StreamOutput out) throws IOException {} + + @Override + public Version getMinimalSupportedVersion() { + return Version.CURRENT; + } + } + + public void testCheckVersionCompatibility() throws IOException { + Version streamVersion = VersionUtils.randomPreviousCompatibleVersion(random(), Version.CURRENT); + try (VersionCheckingStreamOutput out = new VersionCheckingStreamOutput(streamVersion)) { + out.writeNamedWriteable(QueryBuilders.matchAllQuery()); + + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> out.writeNamedWriteable(new DummyNamedWriteable()) + ); + assertEquals( + "[test_writable] was released first in version " + + Version.CURRENT + + ", failed compatibility check trying to send it to node with version " + + streamVersion, + e.getMessage() + ); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilderTests.java index dbbe8e9f87b25..47e79f1e06be4 100644 --- a/server/src/test/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilderTests.java @@ -27,6 +27,7 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopTermsRewrite; import org.apache.lucene.store.Directory; +import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.io.stream.StreamOutput; @@ -163,6 +164,11 @@ public void writeTo(StreamOutput out) throws IOException { public String fieldName() { return "foo"; } + + @Override + public Version getMinimalSupportedVersion() { + return Version.V_EMPTY; + } } /** diff --git a/server/src/test/java/org/elasticsearch/rest/action/search/RestMultiSearchActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/search/RestMultiSearchActionTests.java index 429871c2049c4..55ba0eb0decba 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/search/RestMultiSearchActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/search/RestMultiSearchActionTests.java @@ -21,7 +21,6 @@ import java.nio.charset.StandardCharsets; import java.util.Collections; -import java.util.HashMap; import java.util.List; import java.util.Map; @@ -62,9 +61,4 @@ public void testTypeInBody() { assertCriticalWarnings(RestMultiSearchAction.TYPES_DEPRECATION_MESSAGE); } - private Map> headersWith(String accept, List value) { - Map> headers = new HashMap<>(); - headers.put(accept, value); - return headers; - } } diff --git a/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java b/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java index 7de59898a7c79..dd612332ce803 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java @@ -10,6 +10,7 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.util.CharsRefBuilder; +import org.elasticsearch.Version; import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -625,6 +626,11 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep public RescoreContext innerBuildContext(int windowSize, SearchExecutionContext context) throws IOException { return null; } + + @Override + public Version getMinimalSupportedVersion() { + return Version.V_EMPTY; + } } private static class TestSuggester extends Suggester { @@ -688,6 +694,11 @@ protected int doHashCode() { public String getWriteableName() { return "test"; } + + @Override + public Version getMinimalSupportedVersion() { + return Version.V_EMPTY; + } } @SuppressWarnings("rawtypes") diff --git a/server/src/test/java/org/elasticsearch/index/query/plugin/DummyQueryBuilder.java b/test/framework/src/main/java/org/elasticsearch/search/DummyQueryBuilder.java similarity index 93% rename from server/src/test/java/org/elasticsearch/index/query/plugin/DummyQueryBuilder.java rename to test/framework/src/main/java/org/elasticsearch/search/DummyQueryBuilder.java index ac9e3a40fad9f..9797c2865ab37 100644 --- a/server/src/test/java/org/elasticsearch/index/query/plugin/DummyQueryBuilder.java +++ b/test/framework/src/main/java/org/elasticsearch/search/DummyQueryBuilder.java @@ -6,14 +6,14 @@ * Side Public License, v 1. */ -package org.elasticsearch.index.query.plugin; +package org.elasticsearch.search; import org.apache.lucene.search.Query; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.SearchExecutionContext; -import org.elasticsearch.index.query.plugin.DummyQueryParserPlugin.DummyQuery; +import org.elasticsearch.search.DummyQueryParserPlugin.DummyQuery; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; diff --git a/server/src/test/java/org/elasticsearch/index/query/plugin/DummyQueryParserPlugin.java b/test/framework/src/main/java/org/elasticsearch/search/DummyQueryParserPlugin.java similarity index 80% rename from server/src/test/java/org/elasticsearch/index/query/plugin/DummyQueryParserPlugin.java rename to test/framework/src/main/java/org/elasticsearch/search/DummyQueryParserPlugin.java index 7d017ff996420..1a4b7b9f0bdbc 100644 --- a/server/src/test/java/org/elasticsearch/index/query/plugin/DummyQueryParserPlugin.java +++ b/test/framework/src/main/java/org/elasticsearch/search/DummyQueryParserPlugin.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.index.query.plugin; +package org.elasticsearch.search; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; @@ -20,13 +20,18 @@ import java.io.IOException; import java.util.List; -import static java.util.Collections.singletonList; - public class DummyQueryParserPlugin extends Plugin implements SearchPlugin { @Override public List> getQueries() { - return singletonList(new QuerySpec<>(DummyQueryBuilder.NAME, DummyQueryBuilder::new, DummyQueryBuilder::fromXContent)); + return List.of( + new QuerySpec<>(DummyQueryBuilder.NAME, DummyQueryBuilder::new, DummyQueryBuilder::fromXContent), + new QuerySpec<>( + FailBeforeCurrentVersionQueryBuilder.NAME, + FailBeforeCurrentVersionQueryBuilder::new, + FailBeforeCurrentVersionQueryBuilder::fromXContent + ) + ); } public static class DummyQuery extends Query { diff --git a/test/framework/src/main/java/org/elasticsearch/search/FailBeforeCurrentVersionQueryBuilder.java b/test/framework/src/main/java/org/elasticsearch/search/FailBeforeCurrentVersionQueryBuilder.java new file mode 100644 index 0000000000000..1106774e15c0e --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/search/FailBeforeCurrentVersionQueryBuilder.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search; + +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; + +/** + * Query simulating serialization error on versions earlier than CURRENT + */ +public class FailBeforeCurrentVersionQueryBuilder extends DummyQueryBuilder { + + public static final String NAME = "fail_before_current_version"; + + public FailBeforeCurrentVersionQueryBuilder(StreamInput in) throws IOException { + super(in); + } + + public FailBeforeCurrentVersionQueryBuilder() {} + + @Override + protected void doWriteTo(StreamOutput out) { + if (out.getVersion().before(Version.CURRENT)) { + throw new IllegalArgumentException("This query isn't serializable to nodes before " + Version.CURRENT); + } + } + + public static DummyQueryBuilder fromXContent(XContentParser parser) throws IOException { + DummyQueryBuilder.fromXContent(parser); + return new FailBeforeCurrentVersionQueryBuilder(); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws IOException { + return this; + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java index 52a3263c4baec..082e4e3a0d7b4 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java @@ -1320,6 +1320,11 @@ protected void doWriteTo(StreamOutput out) throws IOException { throw new UnsupportedOperationException(); } + + @Override + public Version getMinimalSupportedVersion() { + return Version.V_EMPTY; + } } public static class InternalAggCardinalityUpperBound extends InternalAggregation { diff --git a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchActionIT.java b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchActionIT.java index 812937a642f55..c5b7b96e50628 100644 --- a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchActionIT.java +++ b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchActionIT.java @@ -9,12 +9,15 @@ import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.search.DummyQueryBuilder; +import org.elasticsearch.search.SearchService; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.bucket.terms.InternalTerms; import org.elasticsearch.search.aggregations.bucket.terms.StringTerms; @@ -89,6 +92,14 @@ public void setupSuiteScopeCluster() throws InterruptedException { indexRandom(true, true, reqs); } + @Override + protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal, otherSettings)) + .put(SearchService.CCS_VERSION_CHECK_SETTING.getKey(), "true") + .build(); + } + public void testMaxMinAggregation() throws Exception { int step = numShards > 2 ? randomIntBetween(2, numShards) : 2; int numFailures = randomBoolean() ? randomIntBetween(0, numShards) : 0; @@ -507,4 +518,19 @@ public void testMaxResponseSize() { updateSettingsRequest.persistentSettings(Settings.builder().put("search.max_async_search_response_size", (String) null)); assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); } + + public void testCCSCheckCompatibility() throws Exception { + SubmitAsyncSearchRequest request = new SubmitAsyncSearchRequest(new SearchSourceBuilder().query(new DummyQueryBuilder() { + @Override + public Version getMinimalSupportedVersion() { + return Version.CURRENT; + } + }), indexName); + + AsyncSearchResponse response = submitAsyncSearch(request); + assertFalse(response.isRunning()); + Exception failure = response.getFailure(); + assertThat(failure.getMessage(), containsString("error while executing search")); + assertThat(failure.getCause().getMessage(), containsString("the 'search.check_ccs_compatibility' setting is enabled")); + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/TransportTermsEnumAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/TransportTermsEnumAction.java index f86c3344eb54e..87fcfefb5cd26 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/TransportTermsEnumAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/TransportTermsEnumAction.java @@ -82,6 +82,7 @@ import java.util.concurrent.atomic.AtomicReferenceArray; import java.util.stream.Collectors; +import static org.elasticsearch.action.search.TransportSearchHelper.checkCCSVersionCompatibility; import static org.elasticsearch.xpack.core.security.SecurityField.DOCUMENT_LEVEL_SECURITY_FEATURE; public class TransportTermsEnumAction extends HandledTransportAction { @@ -98,6 +99,7 @@ public class TransportTermsEnumAction extends HandledTransportAction listener) { + if (ccsCheckCompatibility) { + checkCCSVersionCompatibility(request); + } new AsyncBroadcastAction(task, request, listener).start(); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/termsenum/TransportTermsEnumActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/termsenum/TransportTermsEnumActionTests.java index 27b87b66316d1..20e88cb69d1c2 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/termsenum/TransportTermsEnumActionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/termsenum/TransportTermsEnumActionTests.java @@ -6,18 +6,39 @@ */ package org.elasticsearch.xpack.core.termsenum; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.search.DummyQueryBuilder; +import org.elasticsearch.search.SearchService; import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; import org.elasticsearch.xpack.core.termsenum.action.TermsEnumAction; import org.elasticsearch.xpack.core.termsenum.action.TermsEnumRequest; import org.elasticsearch.xpack.core.termsenum.action.TermsEnumResponse; +import java.util.Arrays; +import java.util.Collection; +import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicBoolean; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; public class TransportTermsEnumActionTests extends ESSingleNodeTestCase { + @Override + protected Settings nodeSettings() { + return Settings.builder().put(SearchService.CCS_VERSION_CHECK_SETTING.getKey(), "true").build(); + } + + @Override + protected Collection> getPlugins() { + return Arrays.asList(LocalStateCompositeXPackPlugin.class); + } + /* * Copy of test that tripped up similarly broadcast ValidateQuery */ @@ -42,4 +63,25 @@ public void onFailure(final Exception e) { assertThat(invoked.get(), equalTo(true)); // ensure that onFailure was invoked } + /** + * Test that triggering the CCS compatibility check with a query that shouldn't go to the minor before Version.CURRENT works + */ + public void testCCSCheckCompatibility() throws Exception { + TermsEnumRequest request = new TermsEnumRequest().field("field").timeout(TimeValue.timeValueSeconds(5)); + request.indexFilter(new DummyQueryBuilder() { + @Override + public Version getMinimalSupportedVersion() { + return Version.CURRENT; + } + }); + ExecutionException ex = expectThrows(ExecutionException.class, () -> client().execute(TermsEnumAction.INSTANCE, request).get()); + assertThat(ex.getCause().getMessage(), containsString("not compatible with version")); + assertThat(ex.getCause().getMessage(), containsString("the 'search.check_ccs_compatibility' setting is enabled.")); + assertThat( + ex.getCause().getCause().getMessage(), + containsString( + "was released first in version " + Version.CURRENT + ", failed compatibility check trying to send it to node with version" + ) + ); + } } From 5b6164f8fd089072e00dd950bd5c1257915873c1 Mon Sep 17 00:00:00 2001 From: Nicole Albee <2642763+a03nikki@users.noreply.github.com> Date: Thu, 27 Jan 2022 08:23:12 -0600 Subject: [PATCH 072/100] [Docs] Watcher clarification on CSV formulas warning. (#83088) * Update docs for Watcher notification settings to include default xpack.notification.reporting.warning.kbn-csv-contains-formulas.text value and link to the Kibana documentation. * Apply suggestions from code review Co-authored-by: Adam Locke * Apply suggestions from code review Co-authored-by: Adam Locke Co-authored-by: Adam Locke --- .../settings/notification-settings.asciidoc | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/docs/reference/settings/notification-settings.asciidoc b/docs/reference/settings/notification-settings.asciidoc index 874b41eb6b096..44447deeba75d 100644 --- a/docs/reference/settings/notification-settings.asciidoc +++ b/docs/reference/settings/notification-settings.asciidoc @@ -264,6 +264,13 @@ HTML feature groups>>. Set to `false` to completely disable HTML sanitation. Not recommended. Defaults to `true`. +`xpack.notification.reporting.warning.kbn-csv-contains-formulas.text`:: +(<>) +Specifies a custom message, which is sent if the formula verification criteria +for CSV files from {kib}'s {kibana-ref}/reporting-settings-kb.html#reporting-csv-settings[`xpack.reporting.csv.checkForFormulas`] is `true`. +Use `%s` in the message as a placeholder for the filename. +Defaults to `Warning: The attachment [%s] contains characters which spreadsheet applications may interpret as formulas. Please ensure that the attachment is safe prior to opening.` + [[ssl-notification-smtp-settings]] :ssl-prefix: xpack.notification.email :component: {watcher} Email @@ -273,12 +280,6 @@ Defaults to `true`. include::ssl-settings.asciidoc[] -`xpack.notification.reporting.warning.kbn-csv-contains-formulas.text`:: -(<>) -Specifies a custom message to be sent if the formula verification criteria -for CSV files, from kibana `xpack.reporting.csv.checkForFormulas`, is true. -Use %s in the message as a placeholder for the filename. - [[slack-notification-settings]] ==== Slack Notification Settings You can configure the following Slack notification settings in From e0333f5d024ee9985cba5e726b9e92e00e967878 Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Thu, 27 Jan 2022 15:35:40 +0100 Subject: [PATCH 073/100] Make ES version ids more readable (#83202) Use underscores to separate groups of digits to denote the major, minor, and patch versions. --- .../main/java/org/elasticsearch/Version.java | 106 +++++++++--------- 1 file changed, 53 insertions(+), 53 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 50d7bed85b9a2..437fee493307b 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -48,59 +48,59 @@ public class Version implements Comparable, ToXContentFragment { public static final int V_EMPTY_ID = 0; public static final Version V_EMPTY = new Version(V_EMPTY_ID, org.apache.lucene.util.Version.LATEST); - public static final Version V_7_0_0 = new Version(7000099, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final Version V_7_0_1 = new Version(7000199, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final Version V_7_1_0 = new Version(7010099, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final Version V_7_1_1 = new Version(7010199, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final Version V_7_2_0 = new Version(7020099, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final Version V_7_2_1 = new Version(7020199, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final Version V_7_3_0 = new Version(7030099, org.apache.lucene.util.Version.LUCENE_8_1_0); - public static final Version V_7_3_1 = new Version(7030199, org.apache.lucene.util.Version.LUCENE_8_1_0); - public static final Version V_7_3_2 = new Version(7030299, org.apache.lucene.util.Version.LUCENE_8_1_0); - public static final Version V_7_4_0 = new Version(7040099, org.apache.lucene.util.Version.LUCENE_8_2_0); - public static final Version V_7_4_1 = new Version(7040199, org.apache.lucene.util.Version.LUCENE_8_2_0); - public static final Version V_7_4_2 = new Version(7040299, org.apache.lucene.util.Version.LUCENE_8_2_0); - public static final Version V_7_5_0 = new Version(7050099, org.apache.lucene.util.Version.LUCENE_8_3_0); - public static final Version V_7_5_1 = new Version(7050199, org.apache.lucene.util.Version.LUCENE_8_3_0); - public static final Version V_7_5_2 = new Version(7050299, org.apache.lucene.util.Version.LUCENE_8_3_0); - public static final Version V_7_6_0 = new Version(7060099, org.apache.lucene.util.Version.LUCENE_8_4_0); - public static final Version V_7_6_1 = new Version(7060199, org.apache.lucene.util.Version.LUCENE_8_4_0); - public static final Version V_7_6_2 = new Version(7060299, org.apache.lucene.util.Version.LUCENE_8_4_0); - public static final Version V_7_7_0 = new Version(7070099, org.apache.lucene.util.Version.LUCENE_8_5_1); - public static final Version V_7_7_1 = new Version(7070199, org.apache.lucene.util.Version.LUCENE_8_5_1); - public static final Version V_7_8_0 = new Version(7080099, org.apache.lucene.util.Version.LUCENE_8_5_1); - public static final Version V_7_8_1 = new Version(7080199, org.apache.lucene.util.Version.LUCENE_8_5_1); - public static final Version V_7_9_0 = new Version(7090099, org.apache.lucene.util.Version.LUCENE_8_6_0); - public static final Version V_7_9_1 = new Version(7090199, org.apache.lucene.util.Version.LUCENE_8_6_2); - public static final Version V_7_9_2 = new Version(7090299, org.apache.lucene.util.Version.LUCENE_8_6_2); - public static final Version V_7_9_3 = new Version(7090399, org.apache.lucene.util.Version.LUCENE_8_6_2); - public static final Version V_7_10_0 = new Version(7100099, org.apache.lucene.util.Version.LUCENE_8_7_0); - public static final Version V_7_10_1 = new Version(7100199, org.apache.lucene.util.Version.LUCENE_8_7_0); - public static final Version V_7_10_2 = new Version(7100299, org.apache.lucene.util.Version.LUCENE_8_7_0); - public static final Version V_7_11_0 = new Version(7110099, org.apache.lucene.util.Version.LUCENE_8_7_0); - public static final Version V_7_11_1 = new Version(7110199, org.apache.lucene.util.Version.LUCENE_8_7_0); - public static final Version V_7_11_2 = new Version(7110299, org.apache.lucene.util.Version.LUCENE_8_7_0); - public static final Version V_7_12_0 = new Version(7120099, org.apache.lucene.util.Version.LUCENE_8_8_0); - public static final Version V_7_12_1 = new Version(7120199, org.apache.lucene.util.Version.LUCENE_8_8_0); - public static final Version V_7_13_0 = new Version(7130099, org.apache.lucene.util.Version.LUCENE_8_8_2); - public static final Version V_7_13_1 = new Version(7130199, org.apache.lucene.util.Version.LUCENE_8_8_2); - public static final Version V_7_13_2 = new Version(7130299, org.apache.lucene.util.Version.LUCENE_8_8_2); - public static final Version V_7_13_3 = new Version(7130399, org.apache.lucene.util.Version.LUCENE_8_8_2); - public static final Version V_7_13_4 = new Version(7130499, org.apache.lucene.util.Version.LUCENE_8_8_2); - public static final Version V_7_14_0 = new Version(7140099, org.apache.lucene.util.Version.LUCENE_8_9_0); - public static final Version V_7_14_1 = new Version(7140199, org.apache.lucene.util.Version.LUCENE_8_9_0); - public static final Version V_7_14_2 = new Version(7140299, org.apache.lucene.util.Version.LUCENE_8_9_0); - public static final Version V_7_15_0 = new Version(7150099, org.apache.lucene.util.Version.LUCENE_8_9_0); - public static final Version V_7_15_1 = new Version(7150199, org.apache.lucene.util.Version.LUCENE_8_9_0); - public static final Version V_7_15_2 = new Version(7150299, org.apache.lucene.util.Version.LUCENE_8_9_0); - public static final Version V_7_16_0 = new Version(7160099, org.apache.lucene.util.Version.LUCENE_8_10_1); - public static final Version V_7_16_1 = new Version(7160199, org.apache.lucene.util.Version.LUCENE_8_10_1); - public static final Version V_7_16_2 = new Version(7160299, org.apache.lucene.util.Version.LUCENE_8_10_1); - public static final Version V_7_16_3 = new Version(7160399, org.apache.lucene.util.Version.LUCENE_8_10_1); - public static final Version V_7_16_4 = new Version(7160499, org.apache.lucene.util.Version.LUCENE_8_10_1); - public static final Version V_7_17_0 = new Version(7170099, LUCENE_8_11_1); - public static final Version V_8_0_0 = new Version(8000099, org.apache.lucene.util.Version.LUCENE_9_0_0); - public static final Version V_8_1_0 = new Version(8010099, org.apache.lucene.util.Version.LUCENE_9_0_0); + public static final Version V_7_0_0 = new Version(7_00_00_99, org.apache.lucene.util.Version.LUCENE_8_0_0); + public static final Version V_7_0_1 = new Version(7_00_01_99, org.apache.lucene.util.Version.LUCENE_8_0_0); + public static final Version V_7_1_0 = new Version(7_01_00_99, org.apache.lucene.util.Version.LUCENE_8_0_0); + public static final Version V_7_1_1 = new Version(7_01_01_99, org.apache.lucene.util.Version.LUCENE_8_0_0); + public static final Version V_7_2_0 = new Version(7_02_00_99, org.apache.lucene.util.Version.LUCENE_8_0_0); + public static final Version V_7_2_1 = new Version(7_02_01_99, org.apache.lucene.util.Version.LUCENE_8_0_0); + public static final Version V_7_3_0 = new Version(7_03_00_99, org.apache.lucene.util.Version.LUCENE_8_1_0); + public static final Version V_7_3_1 = new Version(7_03_01_99, org.apache.lucene.util.Version.LUCENE_8_1_0); + public static final Version V_7_3_2 = new Version(7_03_02_99, org.apache.lucene.util.Version.LUCENE_8_1_0); + public static final Version V_7_4_0 = new Version(7_04_00_99, org.apache.lucene.util.Version.LUCENE_8_2_0); + public static final Version V_7_4_1 = new Version(7_04_01_99, org.apache.lucene.util.Version.LUCENE_8_2_0); + public static final Version V_7_4_2 = new Version(7_04_02_99, org.apache.lucene.util.Version.LUCENE_8_2_0); + public static final Version V_7_5_0 = new Version(7_05_00_99, org.apache.lucene.util.Version.LUCENE_8_3_0); + public static final Version V_7_5_1 = new Version(7_05_01_99, org.apache.lucene.util.Version.LUCENE_8_3_0); + public static final Version V_7_5_2 = new Version(7_05_02_99, org.apache.lucene.util.Version.LUCENE_8_3_0); + public static final Version V_7_6_0 = new Version(7_06_00_99, org.apache.lucene.util.Version.LUCENE_8_4_0); + public static final Version V_7_6_1 = new Version(7_06_01_99, org.apache.lucene.util.Version.LUCENE_8_4_0); + public static final Version V_7_6_2 = new Version(7_06_02_99, org.apache.lucene.util.Version.LUCENE_8_4_0); + public static final Version V_7_7_0 = new Version(7_07_00_99, org.apache.lucene.util.Version.LUCENE_8_5_1); + public static final Version V_7_7_1 = new Version(7_07_01_99, org.apache.lucene.util.Version.LUCENE_8_5_1); + public static final Version V_7_8_0 = new Version(7_08_00_99, org.apache.lucene.util.Version.LUCENE_8_5_1); + public static final Version V_7_8_1 = new Version(7_08_01_99, org.apache.lucene.util.Version.LUCENE_8_5_1); + public static final Version V_7_9_0 = new Version(7_09_00_99, org.apache.lucene.util.Version.LUCENE_8_6_0); + public static final Version V_7_9_1 = new Version(7_09_01_99, org.apache.lucene.util.Version.LUCENE_8_6_2); + public static final Version V_7_9_2 = new Version(7_09_02_99, org.apache.lucene.util.Version.LUCENE_8_6_2); + public static final Version V_7_9_3 = new Version(7_09_03_99, org.apache.lucene.util.Version.LUCENE_8_6_2); + public static final Version V_7_10_0 = new Version(7_10_00_99, org.apache.lucene.util.Version.LUCENE_8_7_0); + public static final Version V_7_10_1 = new Version(7_10_01_99, org.apache.lucene.util.Version.LUCENE_8_7_0); + public static final Version V_7_10_2 = new Version(7_10_02_99, org.apache.lucene.util.Version.LUCENE_8_7_0); + public static final Version V_7_11_0 = new Version(7_11_00_99, org.apache.lucene.util.Version.LUCENE_8_7_0); + public static final Version V_7_11_1 = new Version(7_11_01_99, org.apache.lucene.util.Version.LUCENE_8_7_0); + public static final Version V_7_11_2 = new Version(7_11_02_99, org.apache.lucene.util.Version.LUCENE_8_7_0); + public static final Version V_7_12_0 = new Version(7_12_00_99, org.apache.lucene.util.Version.LUCENE_8_8_0); + public static final Version V_7_12_1 = new Version(7_12_01_99, org.apache.lucene.util.Version.LUCENE_8_8_0); + public static final Version V_7_13_0 = new Version(7_13_00_99, org.apache.lucene.util.Version.LUCENE_8_8_2); + public static final Version V_7_13_1 = new Version(7_13_01_99, org.apache.lucene.util.Version.LUCENE_8_8_2); + public static final Version V_7_13_2 = new Version(7_13_02_99, org.apache.lucene.util.Version.LUCENE_8_8_2); + public static final Version V_7_13_3 = new Version(7_13_03_99, org.apache.lucene.util.Version.LUCENE_8_8_2); + public static final Version V_7_13_4 = new Version(7_13_04_99, org.apache.lucene.util.Version.LUCENE_8_8_2); + public static final Version V_7_14_0 = new Version(7_14_00_99, org.apache.lucene.util.Version.LUCENE_8_9_0); + public static final Version V_7_14_1 = new Version(7_14_01_99, org.apache.lucene.util.Version.LUCENE_8_9_0); + public static final Version V_7_14_2 = new Version(7_14_02_99, org.apache.lucene.util.Version.LUCENE_8_9_0); + public static final Version V_7_15_0 = new Version(7_15_00_99, org.apache.lucene.util.Version.LUCENE_8_9_0); + public static final Version V_7_15_1 = new Version(7_15_01_99, org.apache.lucene.util.Version.LUCENE_8_9_0); + public static final Version V_7_15_2 = new Version(7_15_02_99, org.apache.lucene.util.Version.LUCENE_8_9_0); + public static final Version V_7_16_0 = new Version(7_16_00_99, org.apache.lucene.util.Version.LUCENE_8_10_1); + public static final Version V_7_16_1 = new Version(7_16_01_99, org.apache.lucene.util.Version.LUCENE_8_10_1); + public static final Version V_7_16_2 = new Version(7_16_02_99, org.apache.lucene.util.Version.LUCENE_8_10_1); + public static final Version V_7_16_3 = new Version(7_16_03_99, org.apache.lucene.util.Version.LUCENE_8_10_1); + public static final Version V_7_16_4 = new Version(7_16_04_99, org.apache.lucene.util.Version.LUCENE_8_10_1); + public static final Version V_7_17_0 = new Version(7_17_00_99, LUCENE_8_11_1); + public static final Version V_8_0_0 = new Version(8_00_00_99, org.apache.lucene.util.Version.LUCENE_9_0_0); + public static final Version V_8_1_0 = new Version(8_01_00_99, org.apache.lucene.util.Version.LUCENE_9_0_0); public static final Version CURRENT = V_8_1_0; private static final ImmutableOpenIntMap idToVersion; From 1504c93e94f8100ca50a18581bfbe1f8de8db8ff Mon Sep 17 00:00:00 2001 From: Justin Cranford <89857999+justincr-elastic@users.noreply.github.com> Date: Thu, 27 Jan 2022 10:09:37 -0500 Subject: [PATCH 074/100] Test example.com, googleapis.com, and content-autofill.googleapis.com (#82898) More eTLD tests for registered domain processor. --- .../RegisteredDomainProcessorTests.java | 23 ++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RegisteredDomainProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RegisteredDomainProcessorTests.java index f63aff0518072..f9d35b902c9d8 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RegisteredDomainProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RegisteredDomainProcessorTests.java @@ -17,6 +17,13 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +/** + * Test parsing of an eTLD from a FQDN. The list of eTLDs is maintained here: + * https://github.com/publicsuffix/list/blob/master/public_suffix_list.dat + * + * Effective TLDs (eTLS) are not the same as DNS TLDs. Uses for eTLDs are listed here. + * https://publicsuffix.org/learn/ + */ public class RegisteredDomainProcessorTests extends ESTestCase { private Map buildEvent(String domain) { return new HashMap<>() { @@ -33,6 +40,21 @@ public void testBasic() throws Exception { testRegisteredDomainProcessor(buildEvent("."), null, null, null, null); testRegisteredDomainProcessor(buildEvent("$"), null, null, null, null); testRegisteredDomainProcessor(buildEvent("foo.bar.baz"), null, null, null, null); + testRegisteredDomainProcessor(buildEvent("www.books.amazon.co.uk"), "www.books.amazon.co.uk", "amazon.co.uk", "co.uk", "www.books"); + // Verify "com" is returned as the eTLD, for that FQDN or subdomain + testRegisteredDomainProcessor(buildEvent("com"), "com", null, "com", null); + testRegisteredDomainProcessor(buildEvent("example.com"), "example.com", "example.com", "com", null); + // Verify "googleapis.com" is returned as the eTLD, for that FQDN or subdomain + testRegisteredDomainProcessor(buildEvent("googleapis.com"), "googleapis.com", null, "googleapis.com", null); + testRegisteredDomainProcessor( + buildEvent("content-autofill.googleapis.com"), + "content-autofill.googleapis.com", + "content-autofill.googleapis.com", + "googleapis.com", + null + ); + // Verify "global.ssl.fastly.net" is returned as the eTLD, for that FQDN or subdomain + testRegisteredDomainProcessor(buildEvent("global.ssl.fastly.net"), "global.ssl.fastly.net", null, "global.ssl.fastly.net", null); testRegisteredDomainProcessor( buildEvent("1.www.global.ssl.fastly.net"), "1.www.global.ssl.fastly.net", @@ -40,7 +62,6 @@ public void testBasic() throws Exception { "global.ssl.fastly.net", "1" ); - testRegisteredDomainProcessor(buildEvent("www.books.amazon.co.uk"), "www.books.amazon.co.uk", "amazon.co.uk", "co.uk", "www.books"); } public void testUseRoot() throws Exception { From c1fbf87de821004043ef355ee5849b404bfa9d9d Mon Sep 17 00:00:00 2001 From: David Kyle Date: Thu, 27 Jan 2022 16:18:20 +0000 Subject: [PATCH 075/100] [ML] Add error counts to trained model stats (#82705) Adds inference_count, timeout_count, rejected_execution_count and error_count fields to trained model stats. --- docs/changelog/82705.yaml | 5 + .../apis/get-trained-models-stats.asciidoc | 78 +++++++++++++++- .../inference/allocation/AllocationStats.java | 92 ++++++++++++++++++- ...TrainedModelsStatsActionResponseTests.java | 3 + .../allocation/AllocationStatsTests.java | 3 + .../TransportGetDeploymentStatsAction.java | 6 +- .../deployment/DeploymentManager.java | 56 ++++++++--- .../ml/inference/deployment/ModelStats.java | 3 + .../process/PyTorchResultProcessor.java | 32 ++++--- .../process/ProcessWorkerExecutorService.java | 8 +- ...chineLearningInfoTransportActionTests.java | 7 ++ .../deployment/DeploymentManagerTests.java | 51 +++++++++- 12 files changed, 301 insertions(+), 43 deletions(-) create mode 100644 docs/changelog/82705.yaml diff --git a/docs/changelog/82705.yaml b/docs/changelog/82705.yaml new file mode 100644 index 0000000000000..b6d48fdbc1f0d --- /dev/null +++ b/docs/changelog/82705.yaml @@ -0,0 +1,5 @@ +pr: 82705 +summary: Add error counts to trained model stats +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/reference/ml/trained-models/apis/get-trained-models-stats.asciidoc b/docs/reference/ml/trained-models/apis/get-trained-models-stats.asciidoc index 57c5f97d062b7..aaee29fff5578 100644 --- a/docs/reference/ml/trained-models/apis/get-trained-models-stats.asciidoc +++ b/docs/reference/ml/trained-models/apis/get-trained-models-stats.asciidoc @@ -112,10 +112,26 @@ The detailed allocation state related to the nodes. The desired number of nodes for model allocation. ====== +`error_count`::: +(integer) +The sum of `error_count` for all nodes in the deployment. + +`inference_count`::: +(integer) +The sum of `inference_count` for all nodes in the deployment. + +`inference_threads`::: +(integer) +The number of threads used by the inference process. + `model_id`::: (string) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=model-id] +`model_threads`::: +(integer) +The number of threads used when sending inference requests to the model. + `nodes`::: (array of objects) The deployment stats for each node that currently has the model allocated. @@ -127,14 +143,30 @@ The deployment stats for each node that currently has the model allocated. (double) The average time for each inference call to complete on this node. +`error_count`::: +(integer) +The number of errors when evaluating the trained model. + `inference_count`::: (integer) The total number of inference calls made against this node for this model. +`inference_threads`::: +(integer) +The number of threads used by the inference process. +This value is limited by the number of hardware threads on the node; +it might therefore differ from the `inference_threads` value in the <> API. + `last_access`::: (long) The epoch time stamp of the last inference call for the model on this node. +`model_threads`::: +(integer) +The number of threads used when sending inference requests to the model. +This value is limited by the number of hardware threads on the node; +it might therefore differ from the `model_threads` value in the <> API. + `node`::: (object) Information pertaining to the node. @@ -162,14 +194,24 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=node-id] include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=node-transport-address] ======== -`reason`::: -(string) -The reason for the current state. Usually only populated when the `routing_state` is `failed`. +`number_of_pending_requests`::: +(integer) +The number of inference requests queued to be processed. `routing_state`::: (object) The current routing state and reason for the current routing state for this allocation. + +.Properties of routing_state +[%collapsible%open] +======== +`reason`::: +(string) +The reason for the current state. Usually only populated when the `routing_state` is `failed`. + +`routing_state`::: +(string) +The current routing state. -- * `starting`: The model is attempting to allocate on this model, inference calls are not yet accepted. * `started`: The model is allocated and ready to accept inference requests. @@ -177,13 +219,34 @@ The current routing state and reason for the current routing state for this allo * `stopped`: The model is fully deallocated from this node. * `failed`: The allocation attempt failed, see `reason` field for the potential cause. -- +======== + +`rejected_execution_count`::: +(integer) +The number of inference requests that were not processed because the +queue was full. `start_time`::: (long) The epoch timestamp when the allocation started. +`timeout_count`::: +(integer) +The number of inference requests that timed out before being processed. ====== +`rejected_execution_count`::: +(integer) +The sum of `rejected_execution_count` for all nodes in the deployment. +Individual nodes reject an inference request if the inference queue is full. +The queue size is controlled by the `queue_capacity` setting in the +<> API. + +`reason`::: +(string) +The reason for the current deployment state. +Usually only populated when the model is not deployed to a node. + `start_time`::: (long) The epoch timestamp when the deployment started. @@ -198,6 +261,15 @@ The overall state of the deployment. The values may be: * `stopping`: The deployment is preparing to stop and deallocate the model from the relevant nodes. -- +`timeout_count`::: +(integer) +The sum of `timeout_count` for all nodes in the deployment. + +`queue_capacity`::: +(integer) +The number of inference requests that may be queued before new requests are +rejected. + ===== `inference_stats`::: diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/allocation/AllocationStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/allocation/AllocationStats.java index 2aa6d317800c7..0b6cd8045c2a8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/allocation/AllocationStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/allocation/AllocationStats.java @@ -31,6 +31,9 @@ public static class NodeStats implements ToXContentObject, Writeable { private final Double avgInferenceTime; private final Instant lastAccess; private final Integer pendingCount; + private final int errorCount; + private final int rejectedExecutionCount; + private final int timeoutCount; private final RoutingStateAndReason routingState; private final Instant startTime; private final Integer inferenceThreads; @@ -41,6 +44,9 @@ public static AllocationStats.NodeStats forStartedState( long inferenceCount, Double avgInferenceTime, int pendingCount, + int errorCount, + int rejectedExecutionCount, + int timeoutCount, Instant lastAccess, Instant startTime, Integer inferenceThreads, @@ -52,6 +58,9 @@ public static AllocationStats.NodeStats forStartedState( avgInferenceTime, lastAccess, pendingCount, + errorCount, + rejectedExecutionCount, + timeoutCount, new RoutingStateAndReason(RoutingState.STARTED, null), Objects.requireNonNull(startTime), inferenceThreads, @@ -60,7 +69,20 @@ public static AllocationStats.NodeStats forStartedState( } public static AllocationStats.NodeStats forNotStartedState(DiscoveryNode node, RoutingState state, String reason) { - return new AllocationStats.NodeStats(node, null, null, null, null, new RoutingStateAndReason(state, reason), null, null, null); + return new AllocationStats.NodeStats( + node, + null, + null, + null, + null, + 0, + 0, + 0, + new RoutingStateAndReason(state, reason), + null, + null, + null + ); } public NodeStats( @@ -69,6 +91,9 @@ public NodeStats( Double avgInferenceTime, Instant lastAccess, Integer pendingCount, + int errorCount, + int rejectedExecutionCount, + int timeoutCount, RoutingStateAndReason routingState, @Nullable Instant startTime, @Nullable Integer inferenceThreads, @@ -79,6 +104,9 @@ public NodeStats( this.avgInferenceTime = avgInferenceTime; this.lastAccess = lastAccess; this.pendingCount = pendingCount; + this.errorCount = errorCount; + this.rejectedExecutionCount = rejectedExecutionCount; + this.timeoutCount = timeoutCount; this.routingState = routingState; this.startTime = startTime; this.inferenceThreads = inferenceThreads; @@ -96,13 +124,18 @@ public NodeStats(StreamInput in) throws IOException { this.pendingCount = in.readOptionalVInt(); this.routingState = in.readOptionalWriteable(RoutingStateAndReason::new); this.startTime = in.readOptionalInstant(); - if (in.getVersion().onOrAfter(Version.V_8_1_0)) { this.inferenceThreads = in.readOptionalVInt(); this.modelThreads = in.readOptionalVInt(); + this.errorCount = in.readVInt(); + this.rejectedExecutionCount = in.readVInt(); + this.timeoutCount = in.readVInt(); } else { this.inferenceThreads = null; this.modelThreads = null; + this.errorCount = 0; + this.rejectedExecutionCount = 0; + this.timeoutCount = 0; } } @@ -130,6 +163,18 @@ public Integer getPendingCount() { return pendingCount; } + public int getErrorCount() { + return errorCount; + } + + public int getRejectedExecutionCount() { + return rejectedExecutionCount; + } + + public int getTimeoutCount() { + return timeoutCount; + } + public Instant getStartTime() { return startTime; } @@ -146,7 +191,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (inferenceCount != null) { builder.field("inference_count", inferenceCount); } - if (avgInferenceTime != null) { + // avoid reporting the average time as 0 if count < 1 + if (avgInferenceTime != null && (inferenceCount != null && inferenceCount > 0)) { builder.field("average_inference_time_ms", avgInferenceTime); } if (lastAccess != null) { @@ -155,6 +201,15 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (pendingCount != null) { builder.field("number_of_pending_requests", pendingCount); } + if (errorCount > 0) { + builder.field("error_count", errorCount); + } + if (rejectedExecutionCount > 0) { + builder.field("rejected_execution_count", rejectedExecutionCount); + } + if (timeoutCount > 0) { + builder.field("timeout_count", timeoutCount); + } if (startTime != null) { builder.timeField("start_time", "start_time_string", startTime.toEpochMilli()); } @@ -180,6 +235,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_8_1_0)) { out.writeOptionalVInt(inferenceThreads); out.writeOptionalVInt(modelThreads); + out.writeVInt(errorCount); + out.writeVInt(rejectedExecutionCount); + out.writeVInt(timeoutCount); } } @@ -193,6 +251,9 @@ public boolean equals(Object o) { && Objects.equals(node, that.node) && Objects.equals(lastAccess, that.lastAccess) && Objects.equals(pendingCount, that.pendingCount) + && Objects.equals(errorCount, that.errorCount) + && Objects.equals(rejectedExecutionCount, that.rejectedExecutionCount) + && Objects.equals(timeoutCount, that.timeoutCount) && Objects.equals(routingState, that.routingState) && Objects.equals(startTime, that.startTime) && Objects.equals(inferenceThreads, that.inferenceThreads) @@ -207,6 +268,9 @@ public int hashCode() { avgInferenceTime, lastAccess, pendingCount, + errorCount, + rejectedExecutionCount, + timeoutCount, routingState, startTime, inferenceThreads, @@ -331,6 +395,28 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("allocation_status", allocationStatus); } builder.timeField("start_time", "start_time_string", startTime.toEpochMilli()); + + int totalErrorCount = nodeStats.stream().mapToInt(NodeStats::getErrorCount).sum(); + int totalRejectedExecutionCount = nodeStats.stream().mapToInt(NodeStats::getRejectedExecutionCount).sum(); + int totalTimeoutCount = nodeStats.stream().mapToInt(NodeStats::getTimeoutCount).sum(); + long totalInferenceCount = nodeStats.stream() + .filter(n -> n.getInferenceCount().isPresent()) + .mapToLong(n -> n.getInferenceCount().get()) + .sum(); + + if (totalErrorCount > 0) { + builder.field("error_count", totalErrorCount); + } + if (totalRejectedExecutionCount > 0) { + builder.field("rejected_execution_count", totalRejectedExecutionCount); + } + if (totalTimeoutCount > 0) { + builder.field("timeout_count", totalTimeoutCount); + } + if (totalInferenceCount > 0) { + builder.field("inference_count", totalInferenceCount); + } + builder.startArray("nodes"); for (AllocationStats.NodeStats nodeStat : nodeStats) { nodeStat.toXContent(builder, params); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetTrainedModelsStatsActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetTrainedModelsStatsActionResponseTests.java index 4a9fad2fea43b..62eac5a5eb6ae 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetTrainedModelsStatsActionResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetTrainedModelsStatsActionResponseTests.java @@ -126,6 +126,9 @@ protected Response mutateInstanceForVersion(Response instance, Version version) nodeStats.getAvgInferenceTime().orElse(null), nodeStats.getLastAccess(), nodeStats.getPendingCount(), + 0, + 0, + 0, nodeStats.getRoutingState(), nodeStats.getStartTime(), null, diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/allocation/AllocationStatsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/allocation/AllocationStatsTests.java index 8e3d97c063715..e5d79b720b018 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/allocation/AllocationStatsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/allocation/AllocationStatsTests.java @@ -57,6 +57,9 @@ public static AllocationStats.NodeStats randomNodeStats(DiscoveryNode node) { randomNonNegativeLong(), randomBoolean() ? randomDoubleBetween(0.0, 100.0, true) : null, randomIntBetween(0, 100), + randomIntBetween(0, 100), + randomIntBetween(0, 100), + randomIntBetween(0, 100), Instant.now(), Instant.now(), randomIntBetween(1, 16), diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java index ff92706794659..a4170cf2ad010 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java @@ -297,9 +297,11 @@ protected void taskOperation( AllocationStats.NodeStats.forStartedState( clusterService.localNode(), stats.get().timingStats().getCount(), - // avoid reporting the average time as 0 if count < 1 - (stats.get().timingStats().getCount() > 0) ? stats.get().timingStats().getAverage() : null, + stats.get().timingStats().getAverage(), stats.get().pendingCount(), + stats.get().errorCount(), + stats.get().rejectedExecutionCount(), + stats.get().timeoutCount(), stats.get().lastUsed(), stats.get().startTime(), stats.get().inferenceThreads(), diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java index a36ef25ff1608..b49b2a950dcce 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java @@ -18,6 +18,7 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.IdsQueryBuilder; @@ -63,6 +64,7 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ExecutorService; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Consumer; @@ -101,25 +103,32 @@ public void startDeployment(TrainedModelDeploymentTask task, ActionListener getStats(TrainedModelDeploymentTask task) { - return Optional.ofNullable(processContextByAllocation.get(task.getId())) - .map( - processContext -> new ModelStats( - processContext.startTime, - processContext.getResultProcessor().getTimingStats(), - processContext.getResultProcessor().getLastUsed(), - processContext.executorService.queueSize() + processContext.getResultProcessor().numberOfPendingResults(), - processContext.inferenceThreads, - processContext.modelThreads - ) + return Optional.ofNullable(processContextByAllocation.get(task.getId())).map(processContext -> { + var stats = processContext.getResultProcessor().getResultStats(); + return new ModelStats( + processContext.startTime, + stats.timingStats(), + stats.lastUsed(), + processContext.executorService.queueSize() + stats.numberOfPendingResults(), + stats.errorCount(), + processContext.rejectedExecutionCount.intValue(), + processContext.timeoutCount.intValue(), + processContext.inferenceThreads, + processContext.modelThreads ); + }); + } + + // function exposed for testing + ProcessContext addProcessContext(Long id, ProcessContext processContext) { + return processContextByAllocation.putIfAbsent(id, processContext); } private void doStartDeployment(TrainedModelDeploymentTask task, ActionListener finalListener) { logger.info("[{}] Starting model deployment", task.getModelId()); ProcessContext processContext = new ProcessContext(task, executorServiceForProcess); - - if (processContextByAllocation.putIfAbsent(task.getId(), processContext) != null) { + if (addProcessContext(task.getId(), processContext) != null) { finalListener.onFailure( ExceptionsHelper.serverError("[{}] Could not create inference process as one already exists", task.getModelId()) ); @@ -259,7 +268,10 @@ public void infer( listener ); try { - processContext.executorService.execute(inferenceAction); + processContext.getExecutorService().execute(inferenceAction); + } catch (EsRejectedExecutionException e) { + processContext.getRejectedExecutionCount().incrementAndGet(); + inferenceAction.onFailure(e); } catch (Exception e) { inferenceAction.onFailure(e); } @@ -302,6 +314,7 @@ static class InferenceAction extends AbstractRunnable implements ActionListener< void onTimeout() { if (notified.compareAndSet(false, true)) { + processContext.getTimeoutCount().incrementAndGet(); processContext.getResultProcessor().ignoreResponseWithoutNotifying(String.valueOf(requestId)); listener.onFailure( new ElasticsearchStatusException("timeout [{}] waiting for inference result", RestStatus.REQUEST_TIMEOUT, timeout) @@ -435,6 +448,8 @@ class ProcessContext { private volatile Instant startTime; private volatile Integer inferenceThreads; private volatile Integer modelThreads; + private AtomicInteger rejectedExecutionCount = new AtomicInteger(); + private AtomicInteger timeoutCount = new AtomicInteger(); ProcessContext(TrainedModelDeploymentTask task, ExecutorService executorService) { this.task = Objects.requireNonNull(task); @@ -492,5 +507,20 @@ void loadModel(TrainedModelLocation modelLocation, ActionListener liste throw new IllegalStateException("unsupported trained model location [" + modelLocation.getClass().getSimpleName() + "]"); } } + + // accessor used for mocking in tests + AtomicInteger getTimeoutCount() { + return timeoutCount; + } + + // accessor used for mocking in tests + ExecutorService getExecutorService() { + return executorService; + } + + // accessor used for mocking in tests + AtomicInteger getRejectedExecutionCount() { + return rejectedExecutionCount; + } } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/ModelStats.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/ModelStats.java index d22ddea8b5279..40280afd08b36 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/ModelStats.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/ModelStats.java @@ -15,6 +15,9 @@ public record ModelStats( LongSummaryStatistics timingStats, Instant lastUsed, int pendingCount, + int errorCount, + int rejectedExecutionCount, + int timeoutCount, Integer inferenceThreads, Integer modelThreads ) {} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchResultProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchResultProcessor.java index bb178b9447746..498d42fc277a2 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchResultProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchResultProcessor.java @@ -25,6 +25,8 @@ public class PyTorchResultProcessor { + public record ResultStats(LongSummaryStatistics timingStats, int errorCount, int numberOfPendingResults, Instant lastUsed) {} + private static final Logger logger = LogManager.getLogger(PyTorchResultProcessor.class); private final ConcurrentMap pendingResults = new ConcurrentHashMap<>(); @@ -33,6 +35,7 @@ public class PyTorchResultProcessor { private volatile boolean isStopping; private final LongSummaryStatistics timingStats; private final Consumer threadSettingsConsumer; + private int errorCount; private Instant lastUsed; public PyTorchResultProcessor(String deploymentId, Consumer threadSettingsConsumer) { @@ -100,12 +103,7 @@ public void process(NativePyTorchProcess process) { private void processInferenceResult(PyTorchInferenceResult inferenceResult) { logger.trace(() -> new ParameterizedMessage("[{}] Parsed result with id [{}]", deploymentId, inferenceResult.getRequestId())); - if (inferenceResult.isError() == false) { - synchronized (this) { - timingStats.accept(inferenceResult.getTimeMs()); - lastUsed = Instant.now(); - } - } + processResult(inferenceResult); PendingResult pendingResult = pendingResults.remove(inferenceResult.getRequestId()); if (pendingResult == null) { logger.debug(() -> new ParameterizedMessage("[{}] no pending result for [{}]", deploymentId, inferenceResult.getRequestId())); @@ -114,16 +112,22 @@ private void processInferenceResult(PyTorchInferenceResult inferenceResult) { } } - public synchronized LongSummaryStatistics getTimingStats() { - return new LongSummaryStatistics(timingStats.getCount(), timingStats.getMin(), timingStats.getMax(), timingStats.getSum()); + public synchronized ResultStats getResultStats() { + return new ResultStats( + new LongSummaryStatistics(timingStats.getCount(), timingStats.getMin(), timingStats.getMax(), timingStats.getSum()), + errorCount, + pendingResults.size(), + lastUsed + ); } - public synchronized Instant getLastUsed() { - return lastUsed; - } - - public int numberOfPendingResults() { - return pendingResults.size(); + private synchronized void processResult(PyTorchInferenceResult result) { + if (result.isError() == false) { + timingStats.accept(result.getTimeMs()); + lastUsed = Instant.now(); + } else { + errorCount++; + } } public void stop() { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/ProcessWorkerExecutorService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/ProcessWorkerExecutorService.java index 8b1c202e667f6..90773fccdbc9c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/ProcessWorkerExecutorService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/ProcessWorkerExecutorService.java @@ -9,13 +9,11 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.SuppressForbidden; -import org.elasticsearch.rest.RestStatus; import java.util.ArrayList; import java.util.List; @@ -104,11 +102,7 @@ public synchronized void execute(Runnable command) { boolean added = queue.offer(contextHolder.preserveContext(command)); if (added == false) { - throw new ElasticsearchStatusException( - processName + " queue is full. Unable to execute command", - RestStatus.TOO_MANY_REQUESTS, - processName - ); + throw new EsRejectedExecutionException(processName + " queue is full. Unable to execute command", false); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningInfoTransportActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningInfoTransportActionTests.java index 213a8c7a4a424..5c3637e463bf4 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningInfoTransportActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningInfoTransportActionTests.java @@ -378,6 +378,9 @@ public void testUsage() throws Exception { 5, 42.0, 0, + 1, + 2, + 3, Instant.now(), Instant.now(), randomIntBetween(1, 16), @@ -388,6 +391,9 @@ public void testUsage() throws Exception { 4, 50.0, 0, + 1, + 2, + 3, Instant.now(), Instant.now(), randomIntBetween(1, 16), @@ -485,6 +491,7 @@ public void testUsage() throws Exception { assertThat(source.getValue("jobs.opened.forecasts.total"), equalTo(11)); assertThat(source.getValue("jobs.opened.forecasts.forecasted_jobs"), equalTo(2)); + // TODO error_count here??? assertThat(source.getValue("inference.trained_models._all.count"), equalTo(4)); assertThat(source.getValue("inference.trained_models.model_size_bytes.min"), equalTo(100.0)); assertThat(source.getValue("inference.trained_models.model_size_bytes.max"), equalTo(300.0)); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManagerTests.java index 61452b81b191c..b6c4a646db012 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManagerTests.java @@ -8,23 +8,33 @@ package org.elasticsearch.xpack.ml.inference.deployment; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ScalingExecutorBuilder; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xpack.core.ml.inference.results.InferenceResults; import org.elasticsearch.xpack.core.ml.inference.results.WarningInferenceResults; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfig; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.PassThroughConfig; +import org.elasticsearch.xpack.ml.inference.pytorch.process.PyTorchProcessFactory; import org.elasticsearch.xpack.ml.inference.pytorch.process.PyTorchResultProcessor; import org.junit.After; import org.junit.Before; import java.util.Map; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.atomic.AtomicInteger; import static org.elasticsearch.xpack.ml.MachineLearning.JOB_COMMS_THREAD_POOL_NAME; import static org.elasticsearch.xpack.ml.MachineLearning.UTILITY_THREAD_POOL_NAME; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -61,9 +71,11 @@ public void shutdownThreadpool() { } public void testInferListenerOnlyCalledOnce() { - PyTorchResultProcessor resultProcessor = new PyTorchResultProcessor("1", threadSettings -> {}); DeploymentManager.ProcessContext processContext = mock(DeploymentManager.ProcessContext.class); + PyTorchResultProcessor resultProcessor = new PyTorchResultProcessor("1", threadSettings -> {}); when(processContext.getResultProcessor()).thenReturn(resultProcessor); + AtomicInteger timeoutCount = new AtomicInteger(); + when(processContext.getTimeoutCount()).thenReturn(timeoutCount); ListenerCounter listener = new ListenerCounter(); DeploymentManager.InferenceAction action = new DeploymentManager.InferenceAction( @@ -105,6 +117,7 @@ public void testInferListenerOnlyCalledOnce() { } assertThat(listener.failureCounts, equalTo(1)); assertThat(listener.responseCounts, equalTo(1)); + assertThat(timeoutCount.intValue(), equalTo(1)); action = new DeploymentManager.InferenceAction( "test-model", @@ -127,6 +140,42 @@ public void testInferListenerOnlyCalledOnce() { assertThat(listener.responseCounts, equalTo(1)); } + public void testRejectedExecution() { + TrainedModelDeploymentTask task = mock(TrainedModelDeploymentTask.class); + Long taskId = 1L; + when(task.getId()).thenReturn(taskId); + when(task.isStopped()).thenReturn(Boolean.FALSE); + + DeploymentManager deploymentManager = new DeploymentManager( + mock(Client.class), + mock(NamedXContentRegistry.class), + tp, + mock(PyTorchProcessFactory.class) + ); + + ExecutorService executorService = mock(ExecutorService.class); + doThrow(new EsRejectedExecutionException("mock executor rejection")).when(executorService).execute(any(Runnable.class)); + + AtomicInteger rejectedCount = new AtomicInteger(); + + DeploymentManager.ProcessContext context = mock(DeploymentManager.ProcessContext.class); + PyTorchResultProcessor resultProcessor = new PyTorchResultProcessor("1", threadSettings -> {}); + when(context.getResultProcessor()).thenReturn(resultProcessor); + when(context.getExecutorService()).thenReturn(executorService); + when(context.getRejectedExecutionCount()).thenReturn(rejectedCount); + + deploymentManager.addProcessContext(taskId, context); + deploymentManager.infer( + task, + mock(InferenceConfig.class), + Map.of(), + TimeValue.timeValueMinutes(1), + ActionListener.wrap(result -> fail("unexpected success"), e -> assertThat(e, instanceOf(EsRejectedExecutionException.class))) + ); + + assertThat(rejectedCount.intValue(), equalTo(1)); + } + private static class ListenerCounter implements ActionListener { private int responseCounts; private int failureCounts; From b610aeeabb6eb659f85f7eac4c5a6bb763c146b4 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Thu, 27 Jan 2022 11:56:19 -0500 Subject: [PATCH 076/100] [ML] add new random_sampler aggregation for background sampling documents (#81228) This is a reincarnation of #53200 This commit adds a new `random_sampler` aggregation for randomly including documents in the collected result. API format is ```js { "aggs": { "sampler": { "random_sampler": { "probability": 0.001, //the probability that a doc is included "seed": 42 // Optional seed for consistent results }, "aggs": { "mean": { "avg": { "field": "value" } } } } } } ``` The sampling skips `n` documents where `n` is a random sampling from an optimized geometric distribution where the probability of success is the provided `probability`. Additionally, each shard queried will have a separate random stream (even when the seed is provided). One may consider `probability` as "percentage of documents matched", but that comparison is not exact as there is variability in the number of documents considered. Performance is greatly improved for many metrics and on larger datasets this improvement can be immense. --- rest-api-spec/build.gradle | 1 + .../search.aggregation/450_random_sampler.yml | 229 ++++++++++++++++++ server/build.gradle | 2 + .../elasticsearch/search/SearchModule.java | 30 +++ .../bucket/sampler/random/FastGeometric.java | 116 +++++++++ .../sampler/random/InternalRandomSampler.java | 68 ++++++ .../RandomSamplerAggregationBuilder.java | 175 +++++++++++++ .../random/RandomSamplerAggregator.java | 106 ++++++++ .../RandomSamplerAggregatorFactory.java | 69 ++++++ .../sampler/random/RandomSamplingQuery.java | 150 ++++++++++++ .../sampler/random/FastGeometricTests.java | 55 +++++ .../random/RandomDocIDSetIteratorTests.java | 46 ++++ .../RandomSamplerAggregationBuilderTests.java | 26 ++ .../random/RandomSamplerAggregatorTests.java | 57 +++++ .../pivot/TransformAggregations.java | 1 + 15 files changed, 1131 insertions(+) create mode 100644 rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/450_random_sampler.yml create mode 100644 server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/FastGeometric.java create mode 100644 server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/InternalRandomSampler.java create mode 100644 server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregationBuilder.java create mode 100644 server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregator.java create mode 100644 server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregatorFactory.java create mode 100644 server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplingQuery.java create mode 100644 server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/random/FastGeometricTests.java create mode 100644 server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomDocIDSetIteratorTests.java create mode 100644 server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregationBuilderTests.java create mode 100644 server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregatorTests.java diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle index a7d118861ee87..71e735de95476 100644 --- a/rest-api-spec/build.gradle +++ b/rest-api-spec/build.gradle @@ -37,6 +37,7 @@ testClusters.configureEach { module ':modules:mapper-extras' if (BuildParams.isSnapshotBuild() == false) { systemProperty 'es.index_mode_feature_flag_registered', 'true' + systemProperty 'es.random_sampler_feature_flag_registered', 'true' } } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/450_random_sampler.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/450_random_sampler.yml new file mode 100644 index 0000000000000..2699ca14dd4c6 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/450_random_sampler.yml @@ -0,0 +1,229 @@ +setup: + - do: + indices.create: + index: data + body: + mappings: + properties: + product: + type: keyword + value: + type: long + + - do: + bulk: + index: data + refresh: true + body: | + {"index": {}} + {"product": "server", "value": 1} + {"index": {}} + {"product": "server", "value": 1} + {"index": {}} + {"product": "server", "value": 1} + {"index": {}} + {"product": "VCR", "value": 4} + {"index": {}} + {"product": "VCR", "value": 4} + {"index": {}} + {"product": "VCR", "value": 4} + +--- +"Test random_sampler aggregation with no filter": + - skip: + features: close_to + version: " - 8.0.99" + reason: added in 8.1.0 + - do: + search: + index: data + size: 0 + body: > + { + "aggs": { + "sampled": { + "random_sampler": { + "probability": 0.95 + }, + "aggs": { + "mean": { + "avg": { + "field": "value" + } + } + } + } + } + } + - close_to: { aggregations.sampled.mean.value: {value: 2.5, error: 1.0} } + +--- +"Test random_sampler aggregation with filter": + - skip: + version: " - 8.0.99" + reason: added in 8.1.0 + - do: + search: + index: data + size: 0 + body: > + { + "query": { + "bool": { + "filter": [ + {"term": {"product": "server"}} + ] + } + }, + "aggs": { + "sampled": { + "random_sampler": { + "probability": 0.95 + }, + "aggs": { + "mean": { + "avg": { + "field": "value" + } + } + } + } + } + } + - match: { aggregations.sampled.mean.value: 1.0 } + + - do: + search: + index: data + size: 0 + body: > + { + "query": { + "bool": { + "filter": [ + {"term": {"product": "VCR"}} + ] + } + }, + "aggs": { + "sampled": { + "random_sampler": { + "probability": 0.95 + }, + "aggs": { + "mean": { + "avg": { + "field": "value" + } + } + } + } + } + } + - match: { aggregations.sampled.mean.value: 4.0 } +--- +"Test random_sampler aggregation with poor settings": + - skip: + version: " - 8.0.99" + reason: added in 8.1.0 + - do: + catch: /\[probability\] must be between 0 and 1/ + search: + index: data + size: 0 + body: > + { + "aggs": { + "sampled": { + "random_sampler": { + "probability": 1.5 + }, + "aggs": { + "mean": { + "avg": { + "field": "value" + } + } + } + } + } + } + - do: + catch: /\[probability\] must be between 0 and 1/ + search: + index: data + size: 0 + body: > + { + "aggs": { + "sampled": { + "random_sampler": { + "probability": 0.0 + }, + "aggs": { + "mean": { + "avg": { + "field": "value" + } + } + } + } + } + } + + - do: + catch: /\[random_sampler\] aggregation \[sampled\] must have sub-aggregations/ + search: + index: data + size: 0 + body: > + { + "aggs": { + "sampled": { + "random_sampler": { + "probability": 0.2 + } + } + } + } + + - do: + catch: /\[random_sampler\] aggregation \[sampled\] does not support sampling \[cardinality\] aggregation \[unique\]/ + search: + index: data + size: 0 + body: > + { + "aggs": { + "sampled": { + "random_sampler": { + "probability": 0.1 + }, + "aggs": { "unique": {"cardinality": {"field": "product"}}} + } + } + } + + - do: + catch: /\[random_sampler\] aggregation \[sampled\] cannot have a parent aggregation/ + search: + index: data + size: 0 + body: > + { + "aggs": { + "terms": { + "terms": { + "field": "product" + }, + "aggs": { + "sampled": { + "random_sampler": { + "probability": 0.1 + }, + "aggs": { "max": {"max": {"field": "value"}}} + } + } + } + } + } diff --git a/server/build.gradle b/server/build.gradle index 6cfe6badaf5cc..53945a5a387cc 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -130,9 +130,11 @@ tasks.named("processResources").configure { if (BuildParams.isSnapshotBuild() == false) { tasks.named("test").configure { systemProperty 'es.index_mode_feature_flag_registered', 'true' + systemProperty 'es.random_sampler_feature_flag_registered', 'true' } tasks.named("internalClusterTest").configure { systemProperty 'es.index_mode_feature_flag_registered', 'true' + systemProperty 'es.random_sampler_feature_flag_registered', 'true' } } diff --git a/server/src/main/java/org/elasticsearch/search/SearchModule.java b/server/src/main/java/org/elasticsearch/search/SearchModule.java index bbd247f7d0ae0..689e4292b7110 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/server/src/main/java/org/elasticsearch/search/SearchModule.java @@ -8,6 +8,7 @@ package org.elasticsearch.search; +import org.elasticsearch.Build; import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.NamedRegistry; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -16,6 +17,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Booleans; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.index.IndexSettings; @@ -131,6 +133,8 @@ import org.elasticsearch.search.aggregations.bucket.sampler.InternalSampler; import org.elasticsearch.search.aggregations.bucket.sampler.SamplerAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.sampler.UnmappedSampler; +import org.elasticsearch.search.aggregations.bucket.sampler.random.InternalRandomSampler; +import org.elasticsearch.search.aggregations.bucket.sampler.random.RandomSamplerAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.terms.DoubleTerms; import org.elasticsearch.search.aggregations.bucket.terms.LongRareTerms; import org.elasticsearch.search.aggregations.bucket.terms.LongTerms; @@ -283,6 +287,21 @@ public class SearchModule { Setting.Property.NodeScope ); + private static final Boolean RANDOM_SAMPLER_AGGREGATION_FLAG_REGISTERED; + + static { + final String property = System.getProperty("es.random_sampler_feature_flag_registered"); + if (Build.CURRENT.isSnapshot() && property != null) { + throw new IllegalArgumentException("es.random_sampler_feature_flag_registered is only supported in non-snapshot builds"); + } + RANDOM_SAMPLER_AGGREGATION_FLAG_REGISTERED = Booleans.parseBoolean(property, null); + } + + public static boolean randomSamplerAggEnabled() { + return Build.CURRENT.isSnapshot() + || (RANDOM_SAMPLER_AGGREGATION_FLAG_REGISTERED != null && RANDOM_SAMPLER_AGGREGATION_FLAG_REGISTERED); + } + private final Map highlighters; private final List fetchSubPhases = new ArrayList<>(); @@ -463,6 +482,17 @@ private ValuesSourceRegistry registerAggregations(List plugins) { ).addResultReader(InternalAdjacencyMatrix::new), builder ); + if (randomSamplerAggEnabled()) { + registerAggregation( + new AggregationSpec( + RandomSamplerAggregationBuilder.NAME, + RandomSamplerAggregationBuilder::new, + RandomSamplerAggregationBuilder.PARSER + ).addResultReader(InternalRandomSampler.NAME, InternalRandomSampler::new) + .setAggregatorRegistrar(s -> s.registerUsage(RandomSamplerAggregationBuilder.NAME)), + builder + ); + } registerAggregation( new AggregationSpec(SamplerAggregationBuilder.NAME, SamplerAggregationBuilder::new, SamplerAggregationBuilder::parse) .addResultReader(InternalSampler.NAME, InternalSampler::new) diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/FastGeometric.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/FastGeometric.java new file mode 100644 index 0000000000000..0af21d1e7d9cb --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/FastGeometric.java @@ -0,0 +1,116 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.aggregations.bucket.sampler.random; + +import java.util.Arrays; +import java.util.function.IntSupplier; + +/** + * Specialized class for randomly sampling values from the geometric distribution + */ +public class FastGeometric { + private static final float[] LOOKUP_TABLE_FOR_FAST_LOG_2; + private static final int BITS = 13; + private static final int SHIFT = 23 - BITS; + + // E[log_2(U[0,1])] = [x log(x) - x]_0^1 / log(2) = -1.0 / log(2). The difference + // between sum{ fastLog2((float)(j)) - log2RngRange } over all distinct integers + // with n bits and this is the bias that the approximate log function introduces. + private static final float[] BIASES = new float[] { -0.49294917f / 256.0f, 4.7501511f / 65536.0f, 31.557305f / 4294967296.0f }; + private static final int[] PERMITTED_BITS_PER_SAMPLE = new int[] { 8, 16, 32 }; + private static final float[] PERMITTED_SAMPLE_RANGES = new float[] { 256.0f, 65536.0f, 4294967296.0f }; + static { + // Notes: + // 1) The shift is the maximum mantissa / bins. + // 2) The exponent is set to 126, which is 0 in two's complement. + // 3) JVM float representation is [sign|exponent|mantissa]. + LOOKUP_TABLE_FOR_FAST_LOG_2 = new float[1 << BITS + 1]; + int dx = 1 << SHIFT; + int x = 0x3F800000 + (dx / 2); + float log2 = (float) Math.log(2.0); + for (int i = 0; i < LOOKUP_TABLE_FOR_FAST_LOG_2.length; i++) { + LOOKUP_TABLE_FOR_FAST_LOG_2[i] = (float) Math.log(Float.intBitsToFloat(x)) / log2; + x += dx; + } + } + + // see https://www1.icsi.berkeley.edu/pubs/techreports/TR-07-002.pdf. + // + // Note this is log base 2. + private static float fastLog2(float x) { + assert x >= 0f : "Error generating fast log as provided value was less than 0"; + int xBits = Float.floatToIntBits(x); + int log2x = ((xBits >>> 23) & 0xFF) - 127; + int xMantissa = xBits & 0x7FFFFF; + return LOOKUP_TABLE_FOR_FAST_LOG_2[xMantissa >>> SHIFT] + log2x; + } + + private final IntSupplier rng; + private int currentSample; + private final int[] currentSamples; + private final float norm; + private final float rngRange; + private final float log2RngRange; + private final float bias; + private final int mask; + private final int bitsPerSample; + private final int needToGenerate; + + /** + * @param randomGen random integer supplier + * @param p the probability of success, must be between (0.0-1.0) + */ + public FastGeometric(IntSupplier randomGen, double p) { + if (p <= 0.0 || p >= 1.0) { + throw new IllegalArgumentException("[p] must be between 0.0 and 1.0, exclusive, was [" + p + "]"); + } + this.rng = randomGen; + int bits = (int) (1.5 * Math.log(Math.log(0.01) / Math.log(1.0 - p) - 1.0) / Math.log(2.0)); + int pos = Arrays.binarySearch(PERMITTED_BITS_PER_SAMPLE, bits); + pos = pos < 0 ? Math.min(-pos - 1, PERMITTED_BITS_PER_SAMPLE.length - 1) : pos; + bitsPerSample = PERMITTED_BITS_PER_SAMPLE[pos]; + needToGenerate = 32 / bitsPerSample; + mask = (Integer.MAX_VALUE >>> (32 - bitsPerSample - 1)); + currentSample = needToGenerate - 1; + currentSamples = new int[needToGenerate]; + norm = (float) (Math.log(2.0) / Math.log(1.0 - p)); + rngRange = PERMITTED_SAMPLE_RANGES[pos]; + log2RngRange = fastLog2(PERMITTED_SAMPLE_RANGES[pos]); + bias = BIASES[pos]; + } + + /** + * @return The next random sample from the geometric distribution + */ + public int next() { + currentSample++; + // Branch prediction should be essentially perfect here so this should be free. + if (currentSample == needToGenerate) { + generate(); + currentSample = 0; + } + return currentSamples[currentSample] + 1; + } + + private void generate() { + int rn = rng.getAsInt(); + + // Branch prediction should be essentially perfect here so this should be free. + if (needToGenerate == 1) { + float u = 0.5f * rngRange + (float) (rn) + 0.5f; + currentSamples[0] = (int) (norm * (fastLog2(u) - log2RngRange)); + } else { + for (int i = 0, shift = bitsPerSample; shift <= 32; i++, shift += bitsPerSample) { + float u = (float) ((rn >>> shift) & mask) + 0.5f; + currentSamples[i] = (int) (norm * (fastLog2(u) - log2RngRange + bias)); + } + } + } + +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/InternalRandomSampler.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/InternalRandomSampler.java new file mode 100644 index 0000000000000..15547dd2a9c8b --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/InternalRandomSampler.java @@ -0,0 +1,68 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.aggregations.bucket.sampler.random; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.aggregations.bucket.InternalSingleBucketAggregation; +import org.elasticsearch.search.aggregations.bucket.sampler.Sampler; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Map; + +public class InternalRandomSampler extends InternalSingleBucketAggregation implements Sampler { + public static final String NAME = "mapped_random_sampler"; + public static final String PARSER_NAME = "random_sampler"; + + private final int seed; + + InternalRandomSampler(String name, long docCount, int seed, InternalAggregations subAggregations, Map metadata) { + super(name, docCount, subAggregations, metadata); + this.seed = seed; + } + + /** + * Read from a stream. + */ + public InternalRandomSampler(StreamInput in) throws IOException { + super(in); + this.seed = in.readInt(); + } + + @Override + protected void doWriteTo(StreamOutput out) throws IOException { + super.doWriteTo(out); + out.writeInt(seed); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public String getType() { + return PARSER_NAME; + } + + @Override + protected InternalSingleBucketAggregation newAggregation(String name, long docCount, InternalAggregations subAggregations) { + return new InternalRandomSampler(name, docCount, seed, subAggregations, metadata); + } + + @Override + public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { + builder.field(RandomSamplerAggregationBuilder.SEED.getPreferredName(), seed); + builder.field(CommonFields.DOC_COUNT.getPreferredName(), getDocCount()); + getAggregations().toXContentInternal(builder, params); + return builder; + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregationBuilder.java new file mode 100644 index 0000000000000..e74dfab97b4ff --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregationBuilder.java @@ -0,0 +1,175 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.aggregations.bucket.sampler.random; + +import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.AggregatorFactory; +import org.elasticsearch.search.aggregations.bucket.nested.NestedAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.sampler.DiversifiedAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.sampler.SamplerAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.CardinalityAggregationBuilder; +import org.elasticsearch.search.aggregations.support.AggregationContext; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Collection; +import java.util.Map; +import java.util.Objects; +import java.util.function.Consumer; + +public class RandomSamplerAggregationBuilder extends AbstractAggregationBuilder { + + public static final String NAME = "random_sampler"; + + static final ParseField PROBABILITY = new ParseField("probability"); + static final ParseField SEED = new ParseField("seed"); + + public static final ObjectParser PARSER = ObjectParser.fromBuilder( + RandomSamplerAggregationBuilder.NAME, + RandomSamplerAggregationBuilder::new + ); + static { + PARSER.declareInt(RandomSamplerAggregationBuilder::setSeed, SEED); + PARSER.declareDouble(RandomSamplerAggregationBuilder::setProbability, PROBABILITY); + } + + public static RandomSamplerAggregationBuilder parse(String aggregationName, XContentParser parser) throws IOException { + return PARSER.parse(parser, new RandomSamplerAggregationBuilder(aggregationName), null); + } + + private int seed = Randomness.get().nextInt(); + private double p = 0.1; + + RandomSamplerAggregationBuilder(String name) { + super(name); + } + + public RandomSamplerAggregationBuilder setProbability(double probability) { + if (probability <= 0 || probability >= 1) { + throw new IllegalArgumentException("[probability] must be between 0 and 1, exclusive, was [" + probability + "]"); + } + this.p = probability; + return this; + } + + public RandomSamplerAggregationBuilder setSeed(int seed) { + this.seed = seed; + return this; + } + + public RandomSamplerAggregationBuilder(StreamInput in) throws IOException { + super(in); + this.p = in.readDouble(); + this.seed = in.readInt(); + } + + protected RandomSamplerAggregationBuilder( + RandomSamplerAggregationBuilder clone, + AggregatorFactories.Builder factoriesBuilder, + Map metadata + ) { + super(clone, factoriesBuilder, metadata); + this.p = clone.p; + this.seed = clone.seed; + } + + @Override + protected void doWriteTo(StreamOutput out) throws IOException { + out.writeDouble(p); + out.writeInt(seed); + } + + void recursivelyCheckSubAggs(Collection builders, Consumer aggregationCheck) { + if (builders == null || builders.isEmpty()) { + return; + } + for (AggregationBuilder b : builders) { + aggregationCheck.accept(b); + recursivelyCheckSubAggs(b.getSubAggregations(), aggregationCheck); + } + } + + @Override + protected AggregatorFactory doBuild( + AggregationContext context, + AggregatorFactory parent, + AggregatorFactories.Builder subfactoriesBuilder + ) throws IOException { + if (parent != null) { + throw new IllegalArgumentException("[random_sampler] aggregation [" + getName() + "] cannot have a parent aggregation"); + } + if (subfactoriesBuilder.getAggregatorFactories().isEmpty()) { + throw new IllegalArgumentException("[random_sampler] aggregation [" + getName() + "] must have sub-aggregations"); + } + recursivelyCheckSubAggs(subfactoriesBuilder.getAggregatorFactories(), builder -> { + // TODO add a method or interface to aggregation builder that defaults to false + if (builder instanceof CardinalityAggregationBuilder + || builder instanceof NestedAggregationBuilder + || builder instanceof SamplerAggregationBuilder + || builder instanceof DiversifiedAggregationBuilder) { + throw new IllegalArgumentException( + "[random_sampler] aggregation [" + + getName() + + "] does not support sampling [" + + builder.getType() + + "] aggregation [" + + builder.getName() + + "]" + ); + } + }); + return new RandomSamplerAggregatorFactory(name, seed, p, context, parent, subfactoriesBuilder, metadata); + } + + @Override + protected XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(PROBABILITY.getPreferredName(), p); + builder.field(SEED.getPreferredName(), seed); + builder.endObject(); + return null; + } + + @Override + protected AggregationBuilder shallowCopy(AggregatorFactories.Builder factoriesBuilder, Map metadata) { + return new RandomSamplerAggregationBuilder(this, factoriesBuilder, metadata); + } + + @Override + public BucketCardinality bucketCardinality() { + return BucketCardinality.ONE; + } + + @Override + public String getType() { + return NAME; + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), p, seed); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null || getClass() != obj.getClass()) return false; + if (super.equals(obj) == false) return false; + RandomSamplerAggregationBuilder other = (RandomSamplerAggregationBuilder) obj; + return Objects.equals(p, other.p) && Objects.equals(seed, other.seed); + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregator.java new file mode 100644 index 0000000000000..2c28d96648678 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregator.java @@ -0,0 +1,106 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.aggregations.bucket.sampler.random; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Weight; +import org.apache.lucene.util.Bits; +import org.elasticsearch.common.CheckedSupplier; +import org.elasticsearch.search.aggregations.Aggregator; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.CardinalityUpperBound; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.LeafBucketCollector; +import org.elasticsearch.search.aggregations.bucket.BucketsAggregator; +import org.elasticsearch.search.aggregations.bucket.SingleBucketAggregator; +import org.elasticsearch.search.aggregations.support.AggregationContext; + +import java.io.IOException; +import java.util.Map; + +public class RandomSamplerAggregator extends BucketsAggregator implements SingleBucketAggregator { + + private final int seed; + private final CheckedSupplier weightSupplier; + + RandomSamplerAggregator( + String name, + int seed, + CheckedSupplier weightSupplier, + AggregatorFactories factories, + AggregationContext context, + Aggregator parent, + CardinalityUpperBound cardinalityUpperBound, + Map metadata + ) throws IOException { + super(name, factories, context, parent, cardinalityUpperBound, metadata); + this.seed = seed; + if (this.subAggregators().length == 0) { + throw new IllegalArgumentException( + RandomSamplerAggregationBuilder.NAME + " aggregation [" + name + "] must have sub aggregations configured" + ); + } + this.weightSupplier = weightSupplier; + } + + @Override + public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + return buildAggregationsForSingleBucket( + owningBucketOrds, + (owningBucketOrd, subAggregationResults) -> new InternalRandomSampler( + name, + bucketDocCount(owningBucketOrd), + seed, + subAggregationResults, + metadata() + ) + ); + } + + @Override + public InternalAggregation buildEmptyAggregation() { + return new InternalRandomSampler(name, 0, seed, buildEmptySubAggregations(), metadata()); + } + + /** + * This is an optimized leaf collector that iterates the documents provided the {@link RandomSamplingQuery} directly. + * + * Instead of sampling in the foreground (i.e. iterating the documents as they are matched + * by the {@link RandomSamplerAggregator#topLevelQuery()}), iterating the document set returned by {@link RandomSamplingQuery} directly + * allows this aggregation to sample documents in the background. This provides a dramatic speed improvement, especially when a + * non-trivial {@link RandomSamplerAggregator#topLevelQuery()} is provided. + * + * @param ctx reader context + * @param sub collector + * @return this always returns {@link LeafBucketCollector#NO_OP_COLLECTOR} + * @throws IOException when building the query or extracting docs fails + */ + @Override + protected LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException { + Scorer scorer = weightSupplier.get().scorer(ctx); + // This means there are no docs to iterate, possibly due to the fields not existing + if (scorer == null) { + return LeafBucketCollector.NO_OP_COLLECTOR; + } + final DocIdSetIterator docIt = scorer.iterator(); + final Bits liveDocs = ctx.reader().getLiveDocs(); + // Iterate every document provided by the scorer iterator + for (int docId = docIt.nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = docIt.nextDoc()) { + // If liveDocs is null, that means that every doc is a live doc, no need to check if it has been deleted or not + if (liveDocs == null || liveDocs.get(docIt.docID())) { + collectBucket(sub, docIt.docID(), 0); + } + } + // Since we have done our own collection, there is nothing for the leaf collector to do + return LeafBucketCollector.NO_OP_COLLECTOR; + } + +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregatorFactory.java new file mode 100644 index 0000000000000..0776ad1f91698 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregatorFactory.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.aggregations.bucket.sampler.random; + +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Weight; +import org.elasticsearch.search.aggregations.Aggregator; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.AggregatorFactory; +import org.elasticsearch.search.aggregations.CardinalityUpperBound; +import org.elasticsearch.search.aggregations.support.AggregationContext; + +import java.io.IOException; +import java.util.Map; + +public class RandomSamplerAggregatorFactory extends AggregatorFactory { + + private final int seed; + private final double probability; + private Weight weight; + + RandomSamplerAggregatorFactory( + String name, + int seed, + double probability, + AggregationContext context, + AggregatorFactory parent, + AggregatorFactories.Builder subFactories, + Map metadata + ) throws IOException { + super(name, context, parent, subFactories, metadata); + this.probability = probability; + this.seed = seed; + } + + @Override + public Aggregator createInternal(Aggregator parent, CardinalityUpperBound cardinality, Map metadata) + throws IOException { + return new RandomSamplerAggregator(name, seed, this::getWeight, factories, context, parent, cardinality, metadata); + } + + /** + * This creates the query weight which will be used in the aggregator. + * + * This weight is a boolean query between {@link RandomSamplingQuery} and the configured top level query of the search. This allows + * the aggregation to iterate the documents directly, thus sampling in the background instead of the foreground. + * @return weight to be used, is cached for additional usages + * @throws IOException when building the weight or queries fails; + */ + private Weight getWeight() throws IOException { + if (weight == null) { + RandomSamplingQuery query = new RandomSamplingQuery(probability, seed, context.shardRandomSeed()); + BooleanQuery booleanQuery = new BooleanQuery.Builder().add(query, BooleanClause.Occur.FILTER) + .add(context.query(), BooleanClause.Occur.FILTER) + .build(); + weight = context.searcher().createWeight(context.searcher().rewrite(booleanQuery), ScoreMode.COMPLETE_NO_SCORES, 1f); + } + return weight; + } + +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplingQuery.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplingQuery.java new file mode 100644 index 0000000000000..8c1ecc2a715ff --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplingQuery.java @@ -0,0 +1,150 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.aggregations.bucket.sampler.random; + +import com.carrotsearch.hppc.BitMixer; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.ConstantScoreScorer; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.Explanation; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryVisitor; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Weight; + +import java.io.IOException; +import java.util.Objects; +import java.util.SplittableRandom; +import java.util.function.IntSupplier; + +/** + * A query that randomly matches documents with a user-provided probability within a geometric distribution + */ +public final class RandomSamplingQuery extends Query { + + private final double p; + private final SplittableRandom splittableRandom; + private final int seed; + private final int hash; + + /** + * @param p The sampling probability e.g. 0.05 == 5% probability a document will match + * @param seed The seed from the builder + * @param hash A unique hash so that if the same seed is used between multiple queries, unique random number streams + * can be generated + */ + public RandomSamplingQuery(double p, int seed, int hash) { + if (p <= 0.0 || p >= 1.0) { + throw new IllegalArgumentException("RandomSampling probability must be between 0.0 and 1.0, was [" + p + "]"); + } + this.p = p; + this.seed = seed; + this.hash = hash; + this.splittableRandom = new SplittableRandom(BitMixer.mix(hash, seed)); + } + + @Override + public String toString(String field) { + return "RandomSamplingQuery{" + "p=" + p + ", seed=" + seed + ", hash=" + hash + '}'; + } + + @Override + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + return new Weight(this) { + @Override + public boolean isCacheable(LeafReaderContext ctx) { + return true; + } + + @Override + public Explanation explain(LeafReaderContext context, int doc) throws IOException { + final Scorer s = scorer(context); + final boolean exists = s.iterator().advance(doc) == doc; + if (exists) { + return Explanation.match(boost, getQuery().toString()); + } else { + return Explanation.noMatch(getQuery().toString() + " doesn't match id " + doc); + } + } + + @Override + public Scorer scorer(LeafReaderContext context) { + final SplittableRandom random = splittableRandom.split(); + int maxDoc = context.reader().maxDoc(); + return new ConstantScoreScorer( + this, + boost, + ScoreMode.COMPLETE_NO_SCORES, + new RandomSamplingIterator(maxDoc, p, random::nextInt) + ); + } + }; + } + + @Override + public void visit(QueryVisitor visitor) { + visitor.visitLeaf(this); + } + + /** + * A DocIDSetIter that skips a geometrically random number of documents + */ + static class RandomSamplingIterator extends DocIdSetIterator { + private final int maxDoc; + private final double p; + private final FastGeometric distribution; + private int doc = -1; + + RandomSamplingIterator(int maxDoc, double p, IntSupplier rng) { + this.maxDoc = maxDoc; + this.p = p; + this.distribution = new FastGeometric(rng, p); + } + + @Override + public int docID() { + return doc; + } + + @Override + public int nextDoc() { + return advance(doc + 1); + } + + @Override + public int advance(int target) { + while (doc < target && doc < maxDoc) { + doc += distribution.next(); + } + doc = doc < maxDoc ? doc : NO_MORE_DOCS; + return doc; + } + + @Override + public long cost() { + return (long) (maxDoc * p); + } + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + RandomSamplingQuery that = (RandomSamplingQuery) o; + return Double.compare(that.p, p) == 0 && seed == that.seed && hash == that.hash; + } + + @Override + public int hashCode() { + return Objects.hash(p, seed, hash); + } +} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/random/FastGeometricTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/random/FastGeometricTests.java new file mode 100644 index 0000000000000..082ca5c1ca688 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/random/FastGeometricTests.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.aggregations.bucket.sampler.random; + +import org.elasticsearch.test.ESTestCase; + +import java.util.SplittableRandom; + +import static org.hamcrest.Matchers.closeTo; + +public class FastGeometricTests extends ESTestCase { + + private static final int N = 10_000_000; + private static final double[] PROBABILITIES = new double[] { 0.5, 0.1, 0.01, 0.001, 0.0001, 0.00001 }; + + public void testGeometricSeries() { + for (double p : PROBABILITIES) { + final SplittableRandom rng = new SplittableRandom(randomLong()); + final int size = 32; + + double[] expected = new double[size]; + for (int k = 0; k < size; k++) { + expected[k] = Math.pow(1.0 - p, k) * p; + } + + FastGeometric geometric = new FastGeometric(rng::nextInt, p); + int[] counts = new int[size]; + double mean = 0.0; + for (int i = 0; i < N; ++i) { + int sample = (geometric.next() - 1); + if (sample < counts.length) { + counts[sample]++; + } + mean += sample; + } + double[] fractions = new double[counts.length]; + for (int i = 0; i < counts.length; ++i) { + fractions[i] = (double) (counts[i]) / (double) (N); + } + for (int i = 0; i < size; i++) { + assertThat("inaccurate geometric sampling at probability [" + p + "]", fractions[i], closeTo(expected[i], 1e-2)); + } + mean /= N; + double expectedMean = (1 - p) / p; + assertThat("biased mean value when sampling at probability [" + p + "]", mean, closeTo(expectedMean, 1.0 / (Math.pow(p, 0.5)))); + } + } + +} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomDocIDSetIteratorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomDocIDSetIteratorTests.java new file mode 100644 index 0000000000000..a5b9a75281144 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomDocIDSetIteratorTests.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.aggregations.bucket.sampler.random; + +import org.apache.lucene.search.DocIdSetIterator; +import org.elasticsearch.test.ESTestCase; + +import java.util.SplittableRandom; + +public class RandomDocIDSetIteratorTests extends ESTestCase { + + public void testRandomSampler() { + int maxDoc = 10000; + SplittableRandom random = new SplittableRandom(randomInt()); + + for (int i = 1; i < 100; i++) { + double p = i / 100.0; + int count = 0; + RandomSamplingQuery.RandomSamplingIterator iter = new RandomSamplingQuery.RandomSamplingIterator(maxDoc, p, random::nextInt); + while (iter.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { + count += 1; + } + + double error = Math.abs((maxDoc * p) / count) / (maxDoc * p); + if (error > 0.05) { + fail( + "Hit count was [" + + count + + "], expected to be close to " + + maxDoc * p + + " (+/- 5% error). Error was " + + error + + ", p=" + + p + ); + } + } + } + +} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregationBuilderTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregationBuilderTests.java new file mode 100644 index 0000000000000..288399efdb4a2 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregationBuilderTests.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.aggregations.bucket.sampler.random; + +import org.elasticsearch.search.aggregations.AggregationBuilders; +import org.elasticsearch.search.aggregations.BaseAggregationTestCase; + +public class RandomSamplerAggregationBuilderTests extends BaseAggregationTestCase { + + @Override + protected RandomSamplerAggregationBuilder createTestAggregatorBuilder() { + RandomSamplerAggregationBuilder builder = new RandomSamplerAggregationBuilder(randomAlphaOfLength(10)); + if (randomBoolean()) { + builder.setSeed(randomInt()); + } + builder.subAggregation(AggregationBuilders.max(randomAlphaOfLength(10)).field(randomAlphaOfLength(10))); + return builder; + } + +} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregatorTests.java new file mode 100644 index 0000000000000..e85062e7eb59a --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregatorTests.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.aggregations.bucket.sampler.random; + +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.elasticsearch.search.aggregations.AggregationBuilders; +import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.metrics.Avg; + +import java.io.IOException; +import java.util.List; + +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.closeTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.lessThan; + +public class RandomSamplerAggregatorTests extends AggregatorTestCase { + + private static final String NUMERIC_FIELD_NAME = "value"; + + public void testAggregationSampling() throws IOException { + testCase( + new RandomSamplerAggregationBuilder("my_agg").subAggregation(AggregationBuilders.avg("avg").field(NUMERIC_FIELD_NAME)) + .setProbability(0.25), + new MatchAllDocsQuery(), + RandomSamplerAggregatorTests::writeTestDocs, + (InternalRandomSampler result) -> { + assertThat(result.getDocCount(), allOf(greaterThan(20L), lessThan(60L))); + Avg agg = result.getAggregations().get("avg"); + assertThat(agg.getValue(), closeTo(2, 1)); + }, + longField(NUMERIC_FIELD_NAME) + ); + } + + private static void writeTestDocs(RandomIndexWriter w) throws IOException { + for (int i = 0; i < 50; i++) { + w.addDocument(List.of(new SortedNumericDocValuesField(NUMERIC_FIELD_NAME, 1))); + } + for (int i = 0; i < 50; i++) { + w.addDocument(List.of(new SortedNumericDocValuesField(NUMERIC_FIELD_NAME, 2))); + } + for (int i = 0; i < 25; i++) { + w.addDocument(List.of(new SortedNumericDocValuesField(NUMERIC_FIELD_NAME, 4))); + } + } + +} diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/TransformAggregations.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/TransformAggregations.java index 1b20af39fedb7..dd7c4fe1edd37 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/TransformAggregations.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/TransformAggregations.java @@ -70,6 +70,7 @@ public final class TransformAggregations { "nested", "percentile_ranks", "range", + "random_sampler", "reverse_nested", "sampler", "significant_terms", // https://github.com/elastic/elasticsearch/issues/51073 From 124ef4b41cc4273807c826f7a4f7155f03dff1e9 Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Thu, 27 Jan 2022 07:17:26 -1000 Subject: [PATCH 077/100] Fix OOM in TimeSeriesAggregationsIT tests (#83189) Reduce the maximum number of generated dimensions in order to prevent the test from OOMing until we have a more robust mechanism of handling high-cardinality fields. Closes #83187 --- .../search/aggregations/TimeSeriesAggregationsIT.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/TimeSeriesAggregationsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/TimeSeriesAggregationsIT.java index a94fa23530e68..a7901be7ec1be 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/TimeSeriesAggregationsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/TimeSeriesAggregationsIT.java @@ -81,7 +81,9 @@ public void setupSuiteScopeCluster() throws Exception { dimensions = new String[numberOfDimensions][]; int dimCardinality = 1; for (int i = 0; i < dimensions.length; i++) { - dimensions[i] = randomUnique(() -> randomAlphaOfLength(10), randomIntBetween(1, 30 / numberOfMetrics)).toArray(new String[0]); + dimensions[i] = randomUnique(() -> randomAlphaOfLength(10), randomIntBetween(1, 20 / numberOfDimensions)).toArray( + new String[0] + ); dimCardinality *= dimensions[i].length; } From 024d2fc042ec7f1aa962008ecb88ff847fc56118 Mon Sep 17 00:00:00 2001 From: Adam Locke Date: Thu, 27 Jan 2022 12:38:16 -0500 Subject: [PATCH 078/100] [DOCS] Expand operator privileges explanation for users and roles (#82893) * [DOCS] Expand operator privileges explanation for users and roles * Clarify when to use the elastic user --- .../authentication/built-in-users.asciidoc | 26 +++++++++++++++---- .../authorization/built-in-roles.asciidoc | 7 +++-- .../operator-privileges/index.asciidoc | 12 ++++++--- 3 files changed, 34 insertions(+), 11 deletions(-) diff --git a/x-pack/docs/en/security/authentication/built-in-users.asciidoc b/x-pack/docs/en/security/authentication/built-in-users.asciidoc index 75ae6addc6e8f..5eb13e9d85827 100644 --- a/x-pack/docs/en/security/authentication/built-in-users.asciidoc +++ b/x-pack/docs/en/security/authentication/built-in-users.asciidoc @@ -7,7 +7,28 @@ up and running. These users have a fixed set of privileges and cannot be authenticated until their passwords have been set. The `elastic` user can be used to <>. +.Create users with minimum privileges +**** +The built-in users serve specific purposes and are not intended for general +use. In particular, do not use the `elastic` superuser unless full access to +the cluster is absolutely required. On self-managed deployments, use the +`elastic` user to create users that have the minimum necessary roles or +privileges for their activities. +**** + +[NOTE] +==== +On {ecloud}, {ref}/operator-privileges.html[operator privileges] are enabled. +These privileges restrict some infrastructure functionality, even if a role +would otherwise permit a user to complete an administrative task. +==== + `elastic`:: A built-in <>. ++ +IMPORTANT: Anyone who can log in as the `elastic` user has direct read-only +access to restricted indices, such as `.security`. This user also has the ability +to manage security and create roles with unlimited privileges. + `kibana_system`:: The user Kibana uses to connect and communicate with {es}. `logstash_system`:: The user Logstash uses when storing monitoring information in {es}. `beats_system`:: The user the Beats use when storing monitoring information in {es}. @@ -16,11 +37,6 @@ used to <>. storing monitoring information in {es}. It has the `remote_monitoring_agent` and `remote_monitoring_collector` built-in roles. -TIP: The built-in users serve specific purposes and are not intended for general -use. In particular, do not use the `elastic` superuser unless full access to -the cluster is required. Instead, create users that have the minimum necessary -roles or privileges for their activities. - [discrete] [[built-in-user-explanation]] ==== How the built-in users work diff --git a/x-pack/docs/en/security/authorization/built-in-roles.asciidoc b/x-pack/docs/en/security/authorization/built-in-roles.asciidoc index 7a67dea5a8b83..415b6bdd00f44 100644 --- a/x-pack/docs/en/security/authorization/built-in-roles.asciidoc +++ b/x-pack/docs/en/security/authorization/built-in-roles.asciidoc @@ -170,9 +170,12 @@ to change index settings or to read or update data stream or index data. [[built-in-roles-superuser]] `superuser`:: Grants full access to cluster management and data indices. This role also grants direct read-only access to restricted indices like `.security`. A user with the -`superuser` role can <> any other user in the system. +`superuser` role can <> any other user in the system. + -NOTE: This role can manage security and create roles with unlimited privileges. +On {ecloud}, all standard users, including those with the `superuser` role are +restricted from performing <> actions. ++ +IMPORTANT: This role can manage security and create roles with unlimited privileges. Take extra care when assigning it to a user. [[built-in-roles-transform-admin]] `transform_admin`:: diff --git a/x-pack/docs/en/security/operator-privileges/index.asciidoc b/x-pack/docs/en/security/operator-privileges/index.asciidoc index bb93c35110f32..efa47b685ce18 100644 --- a/x-pack/docs/en/security/operator-privileges/index.asciidoc +++ b/x-pack/docs/en/security/operator-privileges/index.asciidoc @@ -11,14 +11,18 @@ this environment. However, in more managed environments, such as {ess-trial}[{ess}], there is a distinction between the operator of the cluster infrastructure and the administrator of the cluster. -Operator privileges limit some functionality to operator users only. Operator -users are just regular Elasticsearch users with access to specific +Operator privileges limit some functionality to operator users _only_. Operator +users are just regular {es} users with access to specific <>. These privileges are not available to cluster administrators, even if they log in as a highly privileged user such as the `elastic` user or another user with the -superuser role. By limiting system access, operator privileges enhance the -Elasticsearch security model while safeguarding user capabilities. +`superuser` role. By limiting system access, operator privileges enhance the +{es} security model while safeguarding user capabilities. +Operator privileges are enabled on {ecloud}, which means that some +infrastructure management functionality is restricted and cannot be accessed by +your administrative users. This capability protects your cluster from unintended +infrastructure changes. include::configure-operator-privileges.asciidoc[] From e182067f1fa6dd1e716c9a146177da652f6ce65b Mon Sep 17 00:00:00 2001 From: Adam Locke Date: Thu, 27 Jan 2022 13:45:30 -0500 Subject: [PATCH 079/100] Clarify commands shown for "permanently" setting max_map_count (#82345) (#83227) * Clarify commands shown for "permanently" setting max_map_count The current text shows a grep, which of course does not set the value at all. I've clarified why it's offered (to CHECK the value). I've also moved the command for setting the value temporarily to above that discussion, to make it more clear to readers that that's what it does. * Reorder commands, add headings, and fix typos Co-authored-by: Adam Locke Co-authored-by: Charlie Arehart --- docs/reference/setup/install/docker.asciidoc | 35 +++++++++----------- 1 file changed, 16 insertions(+), 19 deletions(-) diff --git a/docs/reference/setup/install/docker.asciidoc b/docs/reference/setup/install/docker.asciidoc index 713298018f255..43375b63c6619 100644 --- a/docs/reference/setup/install/docker.asciidoc +++ b/docs/reference/setup/install/docker.asciidoc @@ -356,12 +356,12 @@ The following requirements and recommendations apply when running {es} in Docker The `vm.max_map_count` kernel setting must be set to at least `262144` for production use. -How you set `vm.max_map_count` depends on your platform: +How you set `vm.max_map_count` depends on your platform. + +====== Linux + +To view the current value for the `vm.max_map_count` setting, run: -* Linux -+ --- -The `vm.max_map_count` setting should be set permanently in `/etc/sysctl.conf`: [source,sh] -------------------------------------------- grep vm.max_map_count /etc/sysctl.conf @@ -374,11 +374,12 @@ To apply the setting on a live system, run: -------------------------------------------- sysctl -w vm.max_map_count=262144 -------------------------------------------- --- -* macOS with https://docs.docker.com/docker-for-mac[Docker for Mac] -+ --- +To permanently change the value for the `vm.max_map_count` setting, update the +value in `/etc/sysctl.conf`. + +====== macOS with https://docs.docker.com/docker-for-mac[Docker for Mac] + The `vm.max_map_count` setting must be set within the xhyve virtual machine: . From the command line, run: @@ -388,7 +389,7 @@ The `vm.max_map_count` setting must be set within the xhyve virtual machine: screen ~/Library/Containers/com.docker.docker/Data/vms/0/tty -------------------------------------------- -. Press enter and use`sysctl` to configure `vm.max_map_count`: +. Press enter and use `sysctl` to configure `vm.max_map_count`: + [source,sh] -------------------------------------------- @@ -396,11 +397,9 @@ sysctl -w vm.max_map_count=262144 -------------------------------------------- . To exit the `screen` session, type `Ctrl a d`. --- -* Windows and macOS with https://www.docker.com/products/docker-desktop[Docker Desktop] -+ --- +====== Windows and macOS with https://www.docker.com/products/docker-desktop[Docker Desktop] + The `vm.max_map_count` setting must be set via docker-machine: [source,sh] @@ -408,11 +407,9 @@ The `vm.max_map_count` setting must be set via docker-machine: docker-machine ssh sudo sysctl -w vm.max_map_count=262144 -------------------------------------------- --- -* Windows with https://docs.docker.com/docker-for-windows/wsl[Docker Desktop WSL 2 backend] -+ --- +====== Windows with https://docs.docker.com/docker-for-windows/wsl[Docker Desktop WSL 2 backend] + The `vm.max_map_count` setting must be set in the docker-desktop container: [source,sh] @@ -420,7 +417,7 @@ The `vm.max_map_count` setting must be set in the docker-desktop container: wsl -d docker-desktop sysctl -w vm.max_map_count=262144 -------------------------------------------- --- + ===== Configuration files must be readable by the `elasticsearch` user From 51c1f27b94d2e8bb55a76b07027a535267f895d6 Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Thu, 27 Jan 2022 19:57:00 +0100 Subject: [PATCH 080/100] Make AbstractDiffable an interface (#83002) * AbstractDiffable doesn't have any state, so it doesn't have to be an abstract class. We represent it as an interface which support default methods since Java 8. The advantage of such refactoring is to allow the classes which extend AbstractDiffable to be represented as Java records. * Rename it SimpleDiffable --- .../http/ClusterStateRestCancellationIT.java | 4 ++-- .../action/admin/indices/rollover/RolloverInfo.java | 6 +++--- .../{AbstractDiffable.java => SimpleDiffable.java} | 12 ++++++------ .../elasticsearch/cluster/block/ClusterBlocks.java | 6 +++--- .../cluster/metadata/AliasMetadata.java | 6 +++--- .../cluster/metadata/ComponentTemplate.java | 6 +++--- .../cluster/metadata/ComposableIndexTemplate.java | 6 +++--- .../elasticsearch/cluster/metadata/DataStream.java | 6 +++--- .../cluster/metadata/DataStreamAlias.java | 6 +++--- .../cluster/metadata/IndexTemplateMetadata.java | 6 +++--- .../cluster/metadata/MappingMetadata.java | 6 +++--- .../cluster/metadata/NodesShutdownMetadata.java | 4 ++-- .../cluster/metadata/SingleNodeShutdownMetadata.java | 8 ++------ .../org/elasticsearch/cluster/metadata/Template.java | 4 ++-- .../elasticsearch/cluster/node/DiscoveryNodes.java | 6 +++--- .../cluster/routing/IndexRoutingTable.java | 6 +++--- .../elasticsearch/ingest/PipelineConfiguration.java | 6 +++--- .../org/elasticsearch/script/StoredScriptSource.java | 6 +++--- .../upgrades/FeatureMigrationResults.java | 4 ++-- .../upgrades/SingleFeatureMigrationResult.java | 4 ++-- .../cluster/coordination/CoordinatorTests.java | 4 ++-- .../cluster/coordination/JoinHelperTests.java | 4 ++-- .../cluster/metadata/ComponentTemplateTests.java | 4 ++-- .../metadata/ComposableIndexTemplateTests.java | 4 ++-- .../cluster/metadata/NodesShutdownMetadataTests.java | 4 ++-- .../cluster/serialization/DiffableTests.java | 8 ++++---- .../PersistentTasksCustomMetadataTests.java | 4 ++-- .../RepositoriesMetadataSerializationTests.java | 4 ++-- .../SnapshotsInProgressSerializationTests.java | 4 ++-- .../upgrades/FeatureMigrationResultsTests.java | 4 ++-- ...java => SimpleDiffableSerializationTestCase.java} | 2 +- ... => SimpleDiffableWireSerializationTestCase.java} | 2 +- .../xpack/autoscaling/AutoscalingMetadata.java | 4 ++-- .../xpack/autoscaling/policy/AutoscalingPolicy.java | 5 ++--- .../policy/AutoscalingPolicyMetadata.java | 8 ++------ ...utoscalingMetadataDiffableSerializationTests.java | 4 ++-- ...lingPolicyMetadataDiffableSerializationTests.java | 8 ++++---- .../xpack/core/action/GetDataStreamAction.java | 4 ++-- .../xpack/core/ilm/IndexLifecycleMetadata.java | 4 ++-- .../xpack/core/ilm/LifecyclePolicy.java | 5 ++--- .../xpack/core/ilm/LifecyclePolicyMetadata.java | 8 ++------ .../org/elasticsearch/xpack/core/ml/MlMetadata.java | 6 +++--- .../xpack/core/ml/datafeed/DatafeedConfig.java | 4 ++-- .../inference/allocation/TrainedModelAllocation.java | 8 ++------ .../elasticsearch/xpack/core/ml/job/config/Job.java | 4 ++-- .../xpack/core/rollup/job/RollupJob.java | 6 +++--- .../xpack/core/slm/SnapshotInvocationRecord.java | 9 ++------- .../xpack/core/slm/SnapshotLifecycleMetadata.java | 4 ++-- .../xpack/core/slm/SnapshotLifecyclePolicy.java | 9 ++------- .../core/slm/SnapshotLifecyclePolicyMetadata.java | 8 ++------ .../xpack/core/transform/transforms/QueryConfig.java | 4 ++-- .../core/transform/transforms/TransformConfig.java | 4 ++-- .../transforms/TransformDestIndexSettings.java | 4 ++-- .../transform/transforms/TransformTaskParams.java | 4 ++-- .../transform/transforms/pivot/ScriptConfig.java | 4 ++-- .../xpack/core/rollup/job/RollupJobTests.java | 4 ++-- .../xpack/ilm/IndexLifecycleMetadataTests.java | 4 ++-- .../xpack/ml/inference/ModelAliasMetadata.java | 6 +++--- .../allocation/TrainedModelAllocationMetadata.java | 4 ++-- 59 files changed, 140 insertions(+), 172 deletions(-) rename server/src/main/java/org/elasticsearch/cluster/{AbstractDiffable.java => SimpleDiffable.java} (78%) rename test/framework/src/main/java/org/elasticsearch/test/{AbstractDiffableSerializationTestCase.java => SimpleDiffableSerializationTestCase.java} (90%) rename test/framework/src/main/java/org/elasticsearch/test/{AbstractDiffableWireSerializationTestCase.java => SimpleDiffableWireSerializationTestCase.java} (90%) diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ClusterStateRestCancellationIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ClusterStateRestCancellationIT.java index 09038d3b6e4b4..0b1bf626f2d4e 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ClusterStateRestCancellationIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ClusterStateRestCancellationIT.java @@ -15,10 +15,10 @@ import org.elasticsearch.client.Cancellable; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; -import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamOutput; @@ -96,7 +96,7 @@ public void testClusterStateRestCancellation() throws Exception { updateClusterState(clusterService, s -> ClusterState.builder(s).removeCustom(AssertingCustom.NAME).build()); } - private static class AssertingCustom extends AbstractDiffable implements ClusterState.Custom { + private static class AssertingCustom implements SimpleDiffable, ClusterState.Custom { static final String NAME = "asserting"; static final AssertingCustom INSTANCE = new AssertingCustom(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverInfo.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverInfo.java index 0ee0e706c2d6d..710794cf9c87b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverInfo.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverInfo.java @@ -8,8 +8,8 @@ package org.elasticsearch.action.admin.indices.rollover; -import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -27,7 +27,7 @@ /** * Class for holding Rollover related information within an index */ -public class RolloverInfo extends AbstractDiffable implements Writeable, ToXContentFragment { +public class RolloverInfo implements SimpleDiffable, Writeable, ToXContentFragment { public static final ParseField CONDITION_FIELD = new ParseField("met_conditions"); public static final ParseField TIME_FIELD = new ParseField("time"); @@ -81,7 +81,7 @@ public long getTime() { } public static Diff readDiffFrom(StreamInput in) throws IOException { - return readDiffFrom(RolloverInfo::new, in); + return SimpleDiffable.readDiffFrom(RolloverInfo::new, in); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/AbstractDiffable.java b/server/src/main/java/org/elasticsearch/cluster/SimpleDiffable.java similarity index 78% rename from server/src/main/java/org/elasticsearch/cluster/AbstractDiffable.java rename to server/src/main/java/org/elasticsearch/cluster/SimpleDiffable.java index 03ae3e32dcf2b..ce4c69d73561c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/AbstractDiffable.java +++ b/server/src/main/java/org/elasticsearch/cluster/SimpleDiffable.java @@ -15,16 +15,16 @@ import java.io.IOException; /** - * Abstract diffable object with simple diffs implementation that sends the entire object if object has changed or + * Simple diffable object with simple diffs implementation that sends the entire object if object has changed or * nothing if object remained the same. */ -public abstract class AbstractDiffable> implements Diffable { +public interface SimpleDiffable> extends Diffable { - private static final Diff EMPTY = new CompleteDiff<>(); + Diff EMPTY = new CompleteDiff<>(); @SuppressWarnings("unchecked") @Override - public Diff diff(T previousState) { + default Diff diff(T previousState) { if (this.equals(previousState)) { return (Diff) EMPTY; } else { @@ -33,14 +33,14 @@ public Diff diff(T previousState) { } @SuppressWarnings("unchecked") - public static > Diff readDiffFrom(Reader reader, StreamInput in) throws IOException { + static > Diff readDiffFrom(Reader reader, StreamInput in) throws IOException { if (in.readBoolean()) { return new CompleteDiff<>(reader.read(in)); } return (Diff) EMPTY; } - private static class CompleteDiff> implements Diff { + class CompleteDiff> implements Diff { @Nullable private final T part; diff --git a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java index 6a187b4d23fd6..fbbf6ed2391c4 100644 --- a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java +++ b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java @@ -8,8 +8,8 @@ package org.elasticsearch.cluster.block; -import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.MetadataIndexStateService; import org.elasticsearch.common.collect.ImmutableOpenMap; @@ -35,7 +35,7 @@ /** * Represents current cluster level blocks to block dirty operations done against the cluster. */ -public class ClusterBlocks extends AbstractDiffable { +public class ClusterBlocks implements SimpleDiffable { public static final ClusterBlocks EMPTY_CLUSTER_BLOCK = new ClusterBlocks(emptySet(), ImmutableOpenMap.of()); private final Set global; @@ -291,7 +291,7 @@ private static Set readBlockSet(StreamInput in) throws IOException } public static Diff readDiffFrom(StreamInput in) throws IOException { - return AbstractDiffable.readDiffFrom(ClusterBlocks::readFrom, in); + return SimpleDiffable.readDiffFrom(ClusterBlocks::readFrom, in); } record ImmutableLevelHolder(Set global, ImmutableOpenMap> indices) {} diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasMetadata.java index 24884e80b4646..c7d68a50c6da5 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasMetadata.java @@ -9,8 +9,8 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.ElasticsearchGenerationException; -import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; @@ -33,7 +33,7 @@ import static java.util.Collections.emptySet; -public class AliasMetadata extends AbstractDiffable implements ToXContentFragment { +public class AliasMetadata implements SimpleDiffable, ToXContentFragment { private final String alias; @@ -223,7 +223,7 @@ public AliasMetadata(StreamInput in) throws IOException { } public static Diff readDiffFrom(StreamInput in) throws IOException { - return readDiffFrom(AliasMetadata::new, in); + return SimpleDiffable.readDiffFrom(AliasMetadata::new, in); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ComponentTemplate.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ComponentTemplate.java index 6829f9e5bd154..b0bdc9fe8161e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ComponentTemplate.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ComponentTemplate.java @@ -8,8 +8,8 @@ package org.elasticsearch.cluster.metadata; -import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -30,7 +30,7 @@ * contains a field "foo", it's expected to contain all the necessary settings/mappings/etc for the * "foo" field. These component templates make up the individual pieces composing an index template. */ -public class ComponentTemplate extends AbstractDiffable implements ToXContentObject { +public class ComponentTemplate implements SimpleDiffable, ToXContentObject { private static final ParseField TEMPLATE = new ParseField("template"); private static final ParseField VERSION = new ParseField("version"); private static final ParseField METADATA = new ParseField("_meta"); @@ -55,7 +55,7 @@ public class ComponentTemplate extends AbstractDiffable imple private final Map metadata; static Diff readComponentTemplateDiffFrom(StreamInput in) throws IOException { - return AbstractDiffable.readDiffFrom(ComponentTemplate::new, in); + return SimpleDiffable.readDiffFrom(ComponentTemplate::new, in); } public static ComponentTemplate parse(XContentParser parser) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java index cd6edd507dc95..0c34449fdea20 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java @@ -9,8 +9,8 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.Version; -import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.io.stream.StreamInput; @@ -38,7 +38,7 @@ * ids corresponding to component templates that should be composed in order when creating a new * index. */ -public class ComposableIndexTemplate extends AbstractDiffable implements ToXContentObject { +public class ComposableIndexTemplate implements SimpleDiffable, ToXContentObject { private static final ParseField INDEX_PATTERNS = new ParseField("index_patterns"); private static final ParseField TEMPLATE = new ParseField("template"); private static final ParseField PRIORITY = new ParseField("priority"); @@ -92,7 +92,7 @@ public class ComposableIndexTemplate extends AbstractDiffable readITV2DiffFrom(StreamInput in) throws IOException { - return AbstractDiffable.readDiffFrom(ComposableIndexTemplate::new, in); + return SimpleDiffable.readDiffFrom(ComposableIndexTemplate::new, in); } public static ComposableIndexTemplate parse(XContentParser parser) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java index 4e830cd95a0ae..309fbe647928b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java @@ -12,8 +12,8 @@ import org.apache.lucene.index.PointValues; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; -import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -46,7 +46,7 @@ import java.util.function.LongSupplier; import java.util.stream.Collectors; -public final class DataStream extends AbstractDiffable implements ToXContentObject { +public final class DataStream implements SimpleDiffable, ToXContentObject { public static final String BACKING_INDEX_PREFIX = ".ds-"; public static final DateFormatter DATE_FORMATTER = DateFormatter.forPattern("uuuu.MM.dd"); @@ -558,7 +558,7 @@ public DataStream(StreamInput in) throws IOException { } public static Diff readDiffFrom(StreamInput in) throws IOException { - return readDiffFrom(DataStream::new, in); + return SimpleDiffable.readDiffFrom(DataStream::new, in); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamAlias.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamAlias.java index 64f725294ce7b..82c419c448143 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamAlias.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamAlias.java @@ -7,8 +7,8 @@ */ package org.elasticsearch.cluster.metadata; -import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; @@ -33,7 +33,7 @@ import java.util.function.Predicate; import java.util.stream.Collectors; -public class DataStreamAlias extends AbstractDiffable implements ToXContentFragment { +public class DataStreamAlias implements SimpleDiffable, ToXContentFragment { public static final ParseField DATA_STREAMS_FIELD = new ParseField("data_streams"); public static final ParseField WRITE_DATA_STREAM_FIELD = new ParseField("write_data_stream"); @@ -278,7 +278,7 @@ public DataStreamAlias restore(DataStreamAlias previous, String renamePattern, S } public static Diff readDiffFrom(StreamInput in) throws IOException { - return readDiffFrom(DataStreamAlias::new, in); + return SimpleDiffable.readDiffFrom(DataStreamAlias::new, in); } public static DataStreamAlias fromXContent(XContentParser parser) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetadata.java index 0d5b2c4fd741a..57f249e719920 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetadata.java @@ -8,8 +8,8 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.MapBuilder; @@ -39,7 +39,7 @@ import static org.elasticsearch.core.RestApiVersion.V_8; import static org.elasticsearch.core.RestApiVersion.onOrAfter; -public class IndexTemplateMetadata extends AbstractDiffable { +public class IndexTemplateMetadata implements SimpleDiffable { private final String name; @@ -199,7 +199,7 @@ public static IndexTemplateMetadata readFrom(StreamInput in) throws IOException } public static Diff readDiffFrom(StreamInput in) throws IOException { - return readDiffFrom(IndexTemplateMetadata::readFrom, in); + return SimpleDiffable.readDiffFrom(IndexTemplateMetadata::readFrom, in); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MappingMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MappingMetadata.java index ebd440dfc6c5c..460a0edc320a2 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MappingMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MappingMetadata.java @@ -10,8 +10,8 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; -import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.io.stream.StreamInput; @@ -30,7 +30,7 @@ /** * Mapping configuration for a type. */ -public class MappingMetadata extends AbstractDiffable { +public class MappingMetadata implements SimpleDiffable { public static final MappingMetadata EMPTY_MAPPINGS = new MappingMetadata( MapperService.SINGLE_MAPPING_NAME, @@ -193,6 +193,6 @@ public MappingMetadata(StreamInput in) throws IOException { } public static Diff readDiffFrom(StreamInput in) throws IOException { - return readDiffFrom(MappingMetadata::new, in); + return SimpleDiffable.readDiffFrom(MappingMetadata::new, in); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/NodesShutdownMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/NodesShutdownMetadata.java index bbbbff441331d..46487b30b8cbd 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/NodesShutdownMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/NodesShutdownMetadata.java @@ -9,11 +9,11 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.Version; -import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.DiffableUtils; import org.elasticsearch.cluster.NamedDiff; +import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.ConstructingObjectParser; @@ -193,7 +193,7 @@ public void writeTo(StreamOutput out) throws IOException { } static Diff readNodesDiffFrom(StreamInput in) throws IOException { - return AbstractDiffable.readDiffFrom(SingleNodeShutdownMetadata::new, in); + return SimpleDiffable.readDiffFrom(SingleNodeShutdownMetadata::new, in); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/SingleNodeShutdownMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/SingleNodeShutdownMetadata.java index 013b6e7d764fe..cfe20ea30967a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/SingleNodeShutdownMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/SingleNodeShutdownMetadata.java @@ -10,8 +10,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.Version; -import org.elasticsearch.cluster.AbstractDiffable; -import org.elasticsearch.cluster.Diffable; +import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -31,10 +30,7 @@ /** * Contains data about a single node's shutdown readiness. */ -public class SingleNodeShutdownMetadata extends AbstractDiffable - implements - ToXContentObject, - Diffable { +public class SingleNodeShutdownMetadata implements SimpleDiffable, ToXContentObject { public static final Version REPLACE_SHUTDOWN_TYPE_ADDED_VERSION = Version.V_7_16_0; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java index c2dca3274b465..84a5f4c8f6d5e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java @@ -8,7 +8,7 @@ package org.elasticsearch.cluster.metadata; -import org.elasticsearch.cluster.AbstractDiffable; +import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.io.stream.StreamInput; @@ -38,7 +38,7 @@ * it is entirely independent from an index. It's a building block forming part of a regular index * template and a {@link ComponentTemplate}. */ -public class Template extends AbstractDiffable