diff --git a/.github/workflows/auto-release.yml b/.github/workflows/auto-release.yml index ae41ae00d018d..252cbda1392f8 100644 --- a/.github/workflows/auto-release.yml +++ b/.github/workflows/auto-release.yml @@ -14,7 +14,7 @@ jobs: steps: - name: GitHub App token id: github_app_token - uses: tibdex/github-app-token@v1.5.0 + uses: tibdex/github-app-token@v2.1.0 with: app_id: ${{ secrets.APP_ID }} private_key: ${{ secrets.APP_PRIVATE_KEY }} diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index 607c6de58b7f6..2a95177174e9b 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -26,7 +26,7 @@ jobs: steps: - name: GitHub App token id: github_app_token - uses: tibdex/github-app-token@v1.5.0 + uses: tibdex/github-app-token@v2.1.0 with: app_id: ${{ secrets.APP_ID }} private_key: ${{ secrets.APP_PRIVATE_KEY }} diff --git a/.github/workflows/create-documentation-issue.yml b/.github/workflows/create-documentation-issue.yml index 373eeb3569a51..df63847f8afca 100644 --- a/.github/workflows/create-documentation-issue.yml +++ b/.github/workflows/create-documentation-issue.yml @@ -14,7 +14,7 @@ jobs: steps: - name: GitHub App token id: github_app_token - uses: tibdex/github-app-token@v1.5.0 + uses: tibdex/github-app-token@v2.1.0 with: app_id: ${{ secrets.APP_ID }} private_key: ${{ secrets.APP_PRIVATE_KEY }} diff --git a/.github/workflows/dependabot_pr.yml b/.github/workflows/dependabot_pr.yml index f03a3d125067d..2a5e539b214d3 100644 --- a/.github/workflows/dependabot_pr.yml +++ b/.github/workflows/dependabot_pr.yml @@ -11,7 +11,7 @@ jobs: steps: - name: GitHub App token id: github_app_token - uses: tibdex/github-app-token@v1.5.0 + uses: tibdex/github-app-token@v2.1.0 with: app_id: ${{ secrets.APP_ID }} private_key: ${{ secrets.APP_PRIVATE_KEY }} diff --git a/.github/workflows/gradle-check.yml b/.github/workflows/gradle-check.yml index bda3bbbc1f125..31578b3fc4660 100644 --- a/.github/workflows/gradle-check.yml +++ b/.github/workflows/gradle-check.yml @@ -72,7 +72,7 @@ jobs: - name: Upload Coverage Report if: success() - uses: codecov/codecov-action@v2 + uses: codecov/codecov-action@v3 with: files: ./codeCoverage.xml diff --git a/.github/workflows/precommit.yml b/.github/workflows/precommit.yml index c628f48e57eba..f4622859916c7 100644 --- a/.github/workflows/precommit.yml +++ b/.github/workflows/precommit.yml @@ -22,10 +22,6 @@ jobs: - name: Setup docker (missing on MacOS) if: runner.os == 'macos' run: | - # Workaround for https://github.com/actions/runner-images/issues/8104 - brew remove --ignore-dependencies qemu - curl -o ./qemu.rb https://raw.githubusercontent.com/Homebrew/homebrew-core/f88e30b3a23ef3735580f9b05535ce5a0a03c9e3/Formula/qemu.rb - brew install ./qemu.rb brew install docker colima start sudo ln -sf $HOME/.colima/default/docker.sock /var/run/docker.sock diff --git a/.github/workflows/stalled.yml b/.github/workflows/stalled.yml new file mode 100644 index 0000000000000..19ec9c9438bbe --- /dev/null +++ b/.github/workflows/stalled.yml @@ -0,0 +1,28 @@ +name: Label Stalled PRs +on: + schedule: + - cron: '15 15 * * *' # Run every day at 15:15 UTC / 7:15 PST / 8:15 PDT +permissions: + pull-requests: write +jobs: + stale: + if: github.repository == 'opensearch-project/OpenSearch' + runs-on: ubuntu-latest + steps: + - name: GitHub App token + id: github_app_token + uses: tibdex/github-app-token@v2.1.0 + with: + app_id: ${{ secrets.APP_ID }} + private_key: ${{ secrets.APP_PRIVATE_KEY }} + installation_id: 22958780 + - name: Stale PRs + uses: actions/stale@v8 + with: + repo-token: ${{ steps.github_app_token.outputs.token }} + stale-pr-label: 'stalled' + stale-pr-message: 'This PR is stalled because it has been open for 30 days with no activity.' + days-before-pr-stale: 30 + days-before-issue-stale: -1 + days-before-pr-close: -1 + days-before-issue-close: -1 diff --git a/.github/workflows/version.yml b/.github/workflows/version.yml index 2fe4fb1ac9d48..df785bcc70014 100644 --- a/.github/workflows/version.yml +++ b/.github/workflows/version.yml @@ -13,7 +13,7 @@ jobs: steps: - name: GitHub App token id: github_app_token - uses: tibdex/github-app-token@v1.5.0 + uses: tibdex/github-app-token@v2.1.0 with: app_id: ${{ secrets.APP_ID }} private_key: ${{ secrets.APP_PRIVATE_KEY }} diff --git a/CHANGELOG.md b/CHANGELOG.md index c1afccabe6c8e..759188fa4e398 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add events correlation engine plugin ([#6854](https://github.com/opensearch-project/OpenSearch/issues/6854)) - Introduce new dynamic cluster setting to control slice computation for concurrent segment search ([#9107](https://github.com/opensearch-project/OpenSearch/pull/9107)) - Implement on behalf of token passing for extensions ([#8679](https://github.com/opensearch-project/OpenSearch/pull/8679)) +- Implement Visitor Design pattern in QueryBuilder to enable the capability to traverse through the complex QueryBuilder tree. ([#10110](https://github.com/opensearch-project/OpenSearch/pull/10110)) +- Provide service accounts tokens to extensions ([#9618](https://github.com/opensearch-project/OpenSearch/pull/9618)) ### Dependencies - Bump `log4j-core` from 2.18.0 to 2.19.0 @@ -41,6 +43,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.bouncycastle:bcmail-jdk15on` to `org.bouncycastle:bcmail-jdk15to18` version 1.75 ([#8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) - Bump `org.bouncycastle:bcpkix-jdk15on` to `org.bouncycastle:bcpkix-jdk15to18` version 1.75 ([#8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) - Bump JNA version from 5.5 to 5.13 ([#9963](https://github.com/opensearch-project/OpenSearch/pull/9963)) +- Bumps jetty version to 9.4.52.v20230823 to fix GMS-2023-1857 ([#9822](https://github.com/opensearch-project/OpenSearch/pull/9822)) +- Bump `org.eclipse.jgit` from 6.5.0 to 6.7.0 ([#10147](https://github.com/opensearch-project/OpenSearch/pull/10147)) ### Changed - [CCR] Add getHistoryOperationsFromTranslog method to fetch the history snapshot from translogs ([#3948](https://github.com/opensearch-project/OpenSearch/pull/3948)) @@ -77,8 +81,13 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 2.x] ### Added +- Add coordinator level stats for search latency ([#8386](https://github.com/opensearch-project/OpenSearch/issues/8386)) - Add metrics for thread_pool task wait time ([#9681](https://github.com/opensearch-project/OpenSearch/pull/9681)) - Async blob read support for S3 plugin ([#9694](https://github.com/opensearch-project/OpenSearch/pull/9694)) +- [Telemetry-Otel] Added support for OtlpGrpcSpanExporter exporter ([#9666](https://github.com/opensearch-project/OpenSearch/pull/9666)) +- Async blob read support for encrypted containers ([#10131](https://github.com/opensearch-project/OpenSearch/pull/10131)) +- Add capability to restrict async durability mode for remote indexes ([#10189](https://github.com/opensearch-project/OpenSearch/pull/10189)) +- Add Doc Status Counter for Indexing Engine ([#4562](https://github.com/opensearch-project/OpenSearch/issues/4562)) ### Dependencies - Bump `peter-evans/create-or-update-comment` from 2 to 3 ([#9575](https://github.com/opensearch-project/OpenSearch/pull/9575)) @@ -88,12 +97,22 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.google.cloud:google-cloud-core-http` from 2.21.1 to 2.23.0 ([#9971](https://github.com/opensearch-project/OpenSearch/pull/9971)) - Bump `mockito` from 5.4.0 to 5.5.0 ([#10022](https://github.com/opensearch-project/OpenSearch/pull/10022)) - Bump `bytebuddy` from 1.14.3 to 1.14.7 ([#10022](https://github.com/opensearch-project/OpenSearch/pull/10022)) +- Bump `com.zaxxer:SparseBitSet` from 1.2 to 1.3 ([#10098](https://github.com/opensearch-project/OpenSearch/pull/10098)) +- Bump `tibdex/github-app-token` from 1.5.0 to 2.1.0 ([#10125](https://github.com/opensearch-project/OpenSearch/pull/10125)) +- Bump `org.wiremock:wiremock-standalone` from 2.35.0 to 3.1.0 ([#9752](https://github.com/opensearch-project/OpenSearch/pull/9752)) +- Bump `com.google.http-client:google-http-client-jackson2` from 1.43.2 to 1.43.3 ([#10126](https://github.com/opensearch-project/OpenSearch/pull/10126)) +- Bump `org.xerial.snappy:snappy-java` from 1.1.10.3 to 1.1.10.4 ([#10206](https://github.com/opensearch-project/OpenSearch/pull/10206)) +- Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.0 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208)) +- Bump `codecov/codecov-action` from 2 to 3 ([#10209](https://github.com/opensearch-project/OpenSearch/pull/10209)) +- Bump `org.bouncycastle:bcpkix-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` ### Changed - Add instrumentation in rest and network layer. ([#9415](https://github.com/opensearch-project/OpenSearch/pull/9415)) - Allow parameterization of tests with OpenSearchIntegTestCase.SuiteScopeTestCase annotation ([#9916](https://github.com/opensearch-project/OpenSearch/pull/9916)) - Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840)) +- Force merge with `only_expunge_deletes` honors max segment size ([#10036](https://github.com/opensearch-project/OpenSearch/pull/10036)) - Add instrumentation in transport service. ([#10042](https://github.com/opensearch-project/OpenSearch/pull/10042)) +- [Tracing Framework] Add support for SpanKind. ([#10122](https://github.com/opensearch-project/OpenSearch/pull/10122)) ### Deprecated @@ -101,7 +120,9 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Fixed - Fix ignore_missing parameter has no effect when using template snippet in rename ingest processor ([#9725](https://github.com/opensearch-project/OpenSearch/pull/9725)) -- Fix broken backward compatibility from 2.7 for IndexSorted field indices ([#10045](https://github.com/opensearch-project/OpenSearch/pull/9725)) +- Fix broken backward compatibility from 2.7 for IndexSorted field indices ([#10045](https://github.com/opensearch-project/OpenSearch/pull/10045)) +- Fix concurrent search NPE when track_total_hits, terminate_after and size=0 are used ([#10082](https://github.com/opensearch-project/OpenSearch/pull/10082)) +- Fix remove ingest processor handing ignore_missing parameter not correctly ([10089](https://github.com/opensearch-project/OpenSearch/pull/10089)) - Fix class_cast_exception when passing int to _version and other metadata fields in ingest simulate API ([#10101](https://github.com/opensearch-project/OpenSearch/pull/10101)) ### Security diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 3dc689795151b..d7bdd09ea882e 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -128,11 +128,15 @@ dependencies { testFixturesApi "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${props.getProperty('randomizedrunner')}" testFixturesApi gradleApi() testFixturesApi gradleTestKit() - testImplementation 'com.github.tomakehurst:wiremock-jre8-standalone:2.35.0' + testImplementation 'org.wiremock:wiremock-standalone:3.1.0' testImplementation "org.mockito:mockito-core:${props.getProperty('mockito')}" integTestImplementation('org.spockframework:spock-core:2.3-groovy-3.0') { exclude module: "groovy" } + implementation('org.ajoberstar.grgit:grgit-core:5.2.0') { + exclude group: 'org.eclipse.jgit', module: 'org.eclipse.jgit' + } + implementation 'org.eclipse.jgit:org.eclipse.jgit:6.7.0.202309050840-r' } configurations.all { diff --git a/buildSrc/version.properties b/buildSrc/version.properties index be09584b1dba1..71b3e267700b1 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ opensearch = 3.0.0 -lucene = 9.8.0-snapshot-4373c3b +lucene = 9.8.0-snapshot-95cdd2e bundled_jdk_vendor = adoptium bundled_jdk = 20.0.2+9 @@ -48,7 +48,7 @@ reactivestreams = 1.0.4 # when updating this version, you need to ensure compatibility with: # - plugins/ingest-attachment (transitive dependency, check the upstream POM) # - distribution/tools/plugin-cli -bouncycastle=1.75 +bouncycastle=1.76 # test dependencies randomizedrunner = 2.7.1 junit = 4.13.2 diff --git a/libs/core/licenses/lucene-core-9.8.0-snapshot-4373c3b.jar.sha1 b/libs/core/licenses/lucene-core-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index dc363f2776429..0000000000000 --- a/libs/core/licenses/lucene-core-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -30c3afcf058532d3d2b8820375043000e7f34a9b \ No newline at end of file diff --git a/libs/core/licenses/lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1 b/libs/core/licenses/lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..70baf1270cd5d --- /dev/null +++ b/libs/core/licenses/lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +d2f7fbc5b2c49ca777a169d579f41082a9a57cc7 \ No newline at end of file diff --git a/libs/core/src/main/java/org/opensearch/core/index/Index.java b/libs/core/src/main/java/org/opensearch/core/index/Index.java index 77cc628213df9..b5ee482bcd952 100644 --- a/libs/core/src/main/java/org/opensearch/core/index/Index.java +++ b/libs/core/src/main/java/org/opensearch/core/index/Index.java @@ -59,6 +59,7 @@ public class Index implements Writeable, ToXContentObject { public static final Index[] EMPTY_ARRAY = new Index[0]; private static final String INDEX_UUID_KEY = "index_uuid"; private static final String INDEX_NAME_KEY = "index_name"; + public static final String UNKNOWN_INDEX_NAME = "_unknown_"; private static final ObjectParser INDEX_PARSER = new ObjectParser<>("index", Builder::new); static { diff --git a/libs/core/src/main/java/org/opensearch/core/rest/RestStatus.java b/libs/core/src/main/java/org/opensearch/core/rest/RestStatus.java index 313bc23bedc90..8441ce8b1b622 100644 --- a/libs/core/src/main/java/org/opensearch/core/rest/RestStatus.java +++ b/libs/core/src/main/java/org/opensearch/core/rest/RestStatus.java @@ -527,6 +527,15 @@ public int getStatus() { return status; } + /** + * Get category class of a rest status code. + * + * @return Integer representing class category of the concrete rest status code + */ + public int getStatusFamilyCode() { + return status / 100; + } + public static RestStatus readFrom(StreamInput in) throws IOException { return RestStatus.valueOf(in.readString()); } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java index 5b7795a5647f5..d3c28b3a9cb5e 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java @@ -9,7 +9,6 @@ package org.opensearch.telemetry.tracing; import org.opensearch.common.annotation.InternalApi; -import org.opensearch.telemetry.tracing.attributes.Attributes; import java.io.Closeable; import java.io.IOException; @@ -44,27 +43,13 @@ public DefaultTracer(TracingTelemetry tracingTelemetry, TracerContextStorage> headers) { Optional propagatedSpan = tracingTelemetry.getContextPropagator().extractFromHeaders(headers); - return startSpan( - spanCreationContext.getSpanName(), - propagatedSpan.map(SpanContext::new).orElse(null), - spanCreationContext.getAttributes() - ); + return startSpan(spanCreationContext.parent(propagatedSpan.map(SpanContext::new).orElse(null))); } } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanCreationContext.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanCreationContext.java index 10cb665e83b01..cbbcfe7a85d57 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanCreationContext.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanCreationContext.java @@ -18,17 +18,74 @@ */ @ExperimentalApi public final class SpanCreationContext { - private final String spanName; - private final Attributes attributes; + private String spanName; + private Attributes attributes; + private SpanKind spanKind = SpanKind.INTERNAL; + private SpanContext parent; /** * Constructor. + */ + private SpanCreationContext() {} + + /** + * Sets the span type to server. + * @return spanCreationContext + */ + public static SpanCreationContext server() { + SpanCreationContext spanCreationContext = new SpanCreationContext(); + spanCreationContext.spanKind = SpanKind.SERVER; + return spanCreationContext; + } + + /** + * Sets the span type to client. + * @return spanCreationContext + */ + public static SpanCreationContext client() { + SpanCreationContext spanCreationContext = new SpanCreationContext(); + spanCreationContext.spanKind = SpanKind.CLIENT; + return spanCreationContext; + } + + /** + * Sets the span type to internal. + * @return spanCreationContext + */ + public static SpanCreationContext internal() { + SpanCreationContext spanCreationContext = new SpanCreationContext(); + spanCreationContext.spanKind = SpanKind.INTERNAL; + return spanCreationContext; + } + + /** + * Sets the span name. * @param spanName span name. - * @param attributes attributes. + * @return spanCreationContext */ - public SpanCreationContext(String spanName, Attributes attributes) { + public SpanCreationContext name(String spanName) { this.spanName = spanName; + return this; + } + + /** + * Sets the span attributes. + * @param attributes attributes. + * @return spanCreationContext + */ + public SpanCreationContext attributes(Attributes attributes) { this.attributes = attributes; + return this; + } + + /** + * Sets the parent for spann + * @param parent parent + * @return spanCreationContext + */ + public SpanCreationContext parent(SpanContext parent) { + this.parent = parent; + return this; } /** @@ -46,4 +103,20 @@ public String getSpanName() { public Attributes getAttributes() { return attributes; } + + /** + * Returns the span kind. + * @return spankind. + */ + public SpanKind getSpanKind() { + return spanKind; + } + + /** + * Returns the parent span + * @return parent. + */ + public SpanContext getParent() { + return parent; + } } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanKind.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanKind.java new file mode 100644 index 0000000000000..d674bb2c866f2 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanKind.java @@ -0,0 +1,31 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import org.opensearch.common.annotation.PublicApi; + +/** + * Type of Span. + */ +@PublicApi(since = "2.11.0") +public enum SpanKind { + /** + * Span represents the client side code. + */ + CLIENT, + /** + * Span represents the server side code. + */ + SERVER, + + /** + * Span represents the internal calls. This is the default value of a span type. + */ + INTERNAL; +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Tracer.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Tracer.java index 19ffc68a62df0..49295f417df9e 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Tracer.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Tracer.java @@ -9,7 +9,6 @@ package org.opensearch.telemetry.tracing; import org.opensearch.common.annotation.ExperimentalApi; -import org.opensearch.telemetry.tracing.attributes.Attributes; import org.opensearch.telemetry.tracing.http.HttpTracer; import java.io.Closeable; @@ -32,34 +31,6 @@ public interface Tracer extends HttpTracer, Closeable { */ Span startSpan(SpanCreationContext context); - /** - * Starts the {@link Span} with given name - * - * @param spanName span name - * @return span, must be closed. - */ - Span startSpan(String spanName); - - /** - * Starts the {@link Span} with given name and attributes. This is required in cases when some attribute based - * decision needs to be made before starting the span. Very useful in the case of Sampling. - * - * @param spanName span name. - * @param attributes attributes to be added. - * @return span, must be closed. - */ - Span startSpan(String spanName, Attributes attributes); - - /** - * Starts the {@link Span} with the given name, parent and attributes. - * - * @param spanName span name. - * @param parentSpan parent span. - * @param attributes attributes to be added. - * @return span, must be closed. - */ - Span startSpan(String spanName, SpanContext parentSpan, Attributes attributes); - /** * Returns the current span. * @return current wrapped span. @@ -74,15 +45,6 @@ public interface Tracer extends HttpTracer, Closeable { */ ScopedSpan startScopedSpan(SpanCreationContext spanCreationContext); - /** - * Start the span and scoped it. This must be used for scenarios where {@link SpanScope} and {@link Span} lifecycles - * are same and ends within the same thread where created. - * @param spanCreationContext span creation context - * @param parentSpan parent span. - * @return scope of the span, must be closed with explicit close or with try-with-resource - */ - ScopedSpan startScopedSpan(SpanCreationContext spanCreationContext, SpanContext parentSpan); - /** * Creates the Span Scope for a current thread. It's mandatory to scope the span just after creation so that it will * automatically manage the attach /detach to the current thread. diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracingTelemetry.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracingTelemetry.java index e1811e85f8890..38c9104c3ec35 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracingTelemetry.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracingTelemetry.java @@ -9,7 +9,6 @@ package org.opensearch.telemetry.tracing; import org.opensearch.common.annotation.ExperimentalApi; -import org.opensearch.telemetry.tracing.attributes.Attributes; import java.io.Closeable; @@ -24,12 +23,11 @@ public interface TracingTelemetry extends Closeable { /** * Creates span with provided arguments * - * @param spanName name of the span - * @param parentSpan span's parent span - * @param attributes attributes to be added. + * @param spanCreationContext span creation context. + * @param parentSpan parent span. * @return span instance */ - Span createSpan(String spanName, Span parentSpan, Attributes attributes); + Span createSpan(SpanCreationContext spanCreationContext, Span parentSpan); /** * provides tracing context propagator diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopTracer.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopTracer.java index d7206a3c6b094..c073e8d3e766f 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopTracer.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopTracer.java @@ -15,7 +15,6 @@ import org.opensearch.telemetry.tracing.SpanCreationContext; import org.opensearch.telemetry.tracing.SpanScope; import org.opensearch.telemetry.tracing.Tracer; -import org.opensearch.telemetry.tracing.attributes.Attributes; import java.util.List; import java.util.Map; @@ -40,21 +39,6 @@ public Span startSpan(SpanCreationContext context) { return NoopSpan.INSTANCE; } - @Override - public Span startSpan(String spanName) { - return NoopSpan.INSTANCE; - } - - @Override - public Span startSpan(String spanName, Attributes attributes) { - return NoopSpan.INSTANCE; - } - - @Override - public Span startSpan(String spanName, SpanContext parentSpan, Attributes attributes) { - return NoopSpan.INSTANCE; - } - @Override public SpanContext getCurrentSpan() { return new SpanContext(NoopSpan.INSTANCE); @@ -65,11 +49,6 @@ public ScopedSpan startScopedSpan(SpanCreationContext spanCreationContext) { return ScopedSpan.NO_OP; } - @Override - public ScopedSpan startScopedSpan(SpanCreationContext spanCreationContext, SpanContext parentSpan) { - return ScopedSpan.NO_OP; - } - @Override public SpanScope withSpanInScope(Span span) { return SpanScope.NO_OP; diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/runnable/TraceableRunnable.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/runnable/TraceableRunnable.java index 4672574e9f4ca..8a61dd70d6d54 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/runnable/TraceableRunnable.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/runnable/TraceableRunnable.java @@ -9,40 +9,32 @@ package org.opensearch.telemetry.tracing.runnable; import org.opensearch.telemetry.tracing.ScopedSpan; -import org.opensearch.telemetry.tracing.SpanContext; import org.opensearch.telemetry.tracing.SpanCreationContext; import org.opensearch.telemetry.tracing.Tracer; -import org.opensearch.telemetry.tracing.attributes.Attributes; /** * Wraps the runnable and add instrumentation to trace the {@link Runnable} */ public class TraceableRunnable implements Runnable { private final Runnable runnable; - private final SpanContext parent; + private final SpanCreationContext spanCreationContext; private final Tracer tracer; - private final String spanName; - private final Attributes attributes; /** * Constructor. * @param tracer tracer - * @param spanName spanName - * @param parent parent Span. - * @param attributes attributes. + * @param spanCreationContext spanCreationContext * @param runnable runnable. */ - public TraceableRunnable(Tracer tracer, String spanName, SpanContext parent, Attributes attributes, Runnable runnable) { + public TraceableRunnable(Tracer tracer, SpanCreationContext spanCreationContext, Runnable runnable) { this.tracer = tracer; - this.spanName = spanName; - this.parent = parent; - this.attributes = attributes; + this.spanCreationContext = spanCreationContext; this.runnable = runnable; } @Override public void run() { - try (ScopedSpan spanScope = tracer.startScopedSpan(new SpanCreationContext(spanName, attributes), parent)) { + try (ScopedSpan spanScope = tracer.startScopedSpan(spanCreationContext)) { runnable.run(); } } diff --git a/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultTracerTests.java b/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultTracerTests.java index 48b72e1f673fe..0a717e993cb81 100644 --- a/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultTracerTests.java +++ b/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultTracerTests.java @@ -22,7 +22,6 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutorService; -import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; @@ -39,6 +38,7 @@ public class DefaultTracerTests extends OpenSearchTestCase { private SpanScope mockSpanScope; private ThreadPool threadPool; private ExecutorService executorService; + private SpanCreationContext spanCreationContext; @Override public void setUp() throws Exception { @@ -58,27 +58,30 @@ public void tearDown() throws Exception { public void testCreateSpan() { DefaultTracer defaultTracer = new DefaultTracer(mockTracingTelemetry, mockTracerContextStorage); - defaultTracer.startSpan("span_name"); + defaultTracer.startSpan(spanCreationContext); - assertEquals("span_name", defaultTracer.getCurrentSpan().getSpan().getSpanName()); + String spanName = defaultTracer.getCurrentSpan().getSpan().getSpanName(); + assertEquals("span_name", spanName); } @SuppressWarnings("unchecked") public void testCreateSpanWithAttributesWithMock() { DefaultTracer defaultTracer = new DefaultTracer(mockTracingTelemetry, mockTracerContextStorage); Attributes attributes = Attributes.create().addAttribute("name", "value"); - when(mockTracingTelemetry.createSpan(eq("span_name"), eq(mockParentSpan), eq(attributes))).thenReturn(mockSpan); - defaultTracer.startSpan("span_name", attributes); - verify(mockTracingTelemetry).createSpan(eq("span_name"), eq(mockParentSpan), eq(attributes)); + SpanCreationContext spanCreationContext = buildSpanCreationContext("span_name", attributes, mockParentSpan); + when(mockTracingTelemetry.createSpan(eq(spanCreationContext), eq(mockParentSpan))).thenReturn(mockSpan); + defaultTracer.startSpan(spanCreationContext); + verify(mockTracingTelemetry).createSpan(eq(spanCreationContext), eq(mockParentSpan)); } @SuppressWarnings("unchecked") public void testCreateSpanWithAttributesWithParentMock() { DefaultTracer defaultTracer = new DefaultTracer(mockTracingTelemetry, mockTracerContextStorage); Attributes attributes = Attributes.create().addAttribute("name", "value"); - when(mockTracingTelemetry.createSpan(eq("span_name"), eq(mockParentSpan), eq(attributes))).thenReturn(mockSpan); - defaultTracer.startSpan("span_name", new SpanContext(mockParentSpan), attributes); - verify(mockTracingTelemetry).createSpan(eq("span_name"), eq(mockParentSpan), eq(attributes)); + SpanCreationContext spanCreationContext = buildSpanCreationContext("span_name", attributes, mockParentSpan); + when(mockTracingTelemetry.createSpan(eq(spanCreationContext), eq(mockParentSpan))).thenReturn(mockSpan); + defaultTracer.startSpan(spanCreationContext); + verify(mockTracingTelemetry).createSpan(eq(spanCreationContext), eq(mockParentSpan)); verify(mockTracerContextStorage, never()).get(TracerContextStorage.CURRENT_SPAN); } @@ -90,11 +93,14 @@ public void testCreateSpanWithAttributes() { new ThreadContextBasedTracerContextStorage(threadContext, tracingTelemetry) ); - Span span = defaultTracer.startSpan( + SpanCreationContext spanCreationContext = buildSpanCreationContext( "span_name", - Attributes.create().addAttribute("key1", 1.0).addAttribute("key2", 2l).addAttribute("key3", true).addAttribute("key4", "key4") + Attributes.create().addAttribute("key1", 1.0).addAttribute("key2", 2l).addAttribute("key3", true).addAttribute("key4", "key4"), + null ); + Span span = defaultTracer.startSpan(spanCreationContext); + assertEquals("span_name", defaultTracer.getCurrentSpan().getSpan().getSpanName()); assertEquals(1.0, ((MockSpan) defaultTracer.getCurrentSpan().getSpan()).getAttribute("key1")); assertEquals(2l, ((MockSpan) defaultTracer.getCurrentSpan().getSpan()).getAttribute("key2")); @@ -110,11 +116,15 @@ public void testCreateSpanWithParent() { new ThreadContextBasedTracerContextStorage(new ThreadContext(Settings.EMPTY), tracingTelemetry) ); - Span span = defaultTracer.startSpan("span_name", null); + SpanCreationContext spanCreationContext = buildSpanCreationContext("span_name", null, null); + + Span span = defaultTracer.startSpan(spanCreationContext, null); SpanContext parentSpan = defaultTracer.getCurrentSpan(); - Span span1 = defaultTracer.startSpan("span_name_1", parentSpan, Attributes.EMPTY); + SpanCreationContext spanCreationContext1 = buildSpanCreationContext("span_name_1", Attributes.EMPTY, parentSpan.getSpan()); + + Span span1 = defaultTracer.startSpan(spanCreationContext1); assertEquals("span_name_1", defaultTracer.getCurrentSpan().getSpan().getSpanName()); assertEquals(parentSpan.getSpan(), defaultTracer.getCurrentSpan().getSpan().getParentSpan()); @@ -126,9 +136,10 @@ public void testCreateSpanWithParent() { public void testCreateSpanWithContext() { DefaultTracer defaultTracer = new DefaultTracer(mockTracingTelemetry, mockTracerContextStorage); Attributes attributes = Attributes.create().addAttribute("name", "value"); - when(mockTracingTelemetry.createSpan(eq("span_name"), eq(mockParentSpan), eq(attributes))).thenReturn(mockSpan); - defaultTracer.startSpan(new SpanCreationContext("span_name", attributes)); - verify(mockTracingTelemetry).createSpan(eq("span_name"), eq(mockParentSpan), eq(attributes)); + SpanCreationContext spanCreationContext = buildSpanCreationContext("span_name", attributes, mockParentSpan); + when(mockTracingTelemetry.createSpan(eq(spanCreationContext), eq(mockParentSpan))).thenReturn(mockSpan); + defaultTracer.startSpan(spanCreationContext); + verify(mockTracingTelemetry).createSpan(eq(spanCreationContext), eq(mockParentSpan)); } public void testCreateSpanWithNullParent() { @@ -139,7 +150,9 @@ public void testCreateSpanWithNullParent() { new ThreadContextBasedTracerContextStorage(threadContext, tracingTelemetry) ); - Span span = defaultTracer.startSpan("span_name", (SpanContext) null, Attributes.EMPTY); + SpanCreationContext spanCreationContext = buildSpanCreationContext("span_name", Attributes.EMPTY, null); + + Span span = defaultTracer.startSpan(spanCreationContext); assertEquals("span_name", defaultTracer.getCurrentSpan().getSpan().getSpanName()); assertEquals(null, defaultTracer.getCurrentSpan().getSpan().getParentSpan()); @@ -154,7 +167,10 @@ public void testEndSpanByClosingScopedSpan() { tracingTelemetry ); DefaultTracer defaultTracer = new DefaultTracer(tracingTelemetry, spanTracerStorage); - ScopedSpan scopedSpan = defaultTracer.startScopedSpan(new SpanCreationContext("span_name", Attributes.EMPTY)); + SpanCreationContext spanCreationContext = buildSpanCreationContext("span_name", Attributes.EMPTY, null); + + ScopedSpan scopedSpan = defaultTracer.startScopedSpan(spanCreationContext); + assertEquals("span_name", defaultTracer.getCurrentSpan().getSpan().getSpanName()); assertEquals(((DefaultScopedSpan) scopedSpan).getSpanScope(), DefaultSpanScope.getCurrentSpanScope()); scopedSpan.close(); @@ -172,8 +188,11 @@ public void testEndSpanByClosingScopedSpanMultiple() { tracingTelemetry ); DefaultTracer defaultTracer = new DefaultTracer(tracingTelemetry, spanTracerStorage); - ScopedSpan scopedSpan = defaultTracer.startScopedSpan(new SpanCreationContext("span_name", Attributes.EMPTY)); - ScopedSpan scopedSpan1 = defaultTracer.startScopedSpan(new SpanCreationContext("span_name_1", Attributes.EMPTY)); + SpanCreationContext spanCreationContext = buildSpanCreationContext("span_name", Attributes.EMPTY, null); + SpanCreationContext spanCreationContext1 = buildSpanCreationContext("span_name_1", Attributes.EMPTY, null); + + ScopedSpan scopedSpan = defaultTracer.startScopedSpan(spanCreationContext); + ScopedSpan scopedSpan1 = defaultTracer.startScopedSpan(spanCreationContext1); assertEquals("span_name_1", defaultTracer.getCurrentSpan().getSpan().getSpanName()); assertEquals(((DefaultScopedSpan) scopedSpan1).getSpanScope(), DefaultSpanScope.getCurrentSpanScope()); @@ -198,7 +217,8 @@ public void testEndSpanByClosingSpanScope() { tracingTelemetry ); DefaultTracer defaultTracer = new DefaultTracer(tracingTelemetry, spanTracerStorage); - Span span = defaultTracer.startSpan(new SpanCreationContext("span_name", Attributes.EMPTY)); + SpanCreationContext spanCreationContext = buildSpanCreationContext("span_name", Attributes.EMPTY, null); + Span span = defaultTracer.startSpan(spanCreationContext); SpanScope spanScope = defaultTracer.withSpanInScope(span); assertEquals("span_name", defaultTracer.getCurrentSpan().getSpan().getSpanName()); assertEquals(spanScope, DefaultSpanScope.getCurrentSpanScope()); @@ -218,8 +238,8 @@ public void testEndSpanByClosingSpanScopeMultiple() { tracingTelemetry ); DefaultTracer defaultTracer = new DefaultTracer(tracingTelemetry, spanTracerStorage); - Span span = defaultTracer.startSpan(new SpanCreationContext("span_name", Attributes.EMPTY)); - Span span1 = defaultTracer.startSpan(new SpanCreationContext("span_name_1", Attributes.EMPTY)); + Span span = defaultTracer.startSpan(buildSpanCreationContext("span_name", Attributes.EMPTY, null)); + Span span1 = defaultTracer.startSpan(buildSpanCreationContext("span_name_1", Attributes.EMPTY, null)); SpanScope spanScope = defaultTracer.withSpanInScope(span); SpanScope spanScope1 = defaultTracer.withSpanInScope(span1); assertEquals("span_name_1", defaultTracer.getCurrentSpan().getSpan().getSpanName()); @@ -261,11 +281,11 @@ public void testSpanAcrossThreads() { CompletableFuture asyncTask = CompletableFuture.runAsync(() -> { // create a span - Span span = defaultTracer.startSpan(new SpanCreationContext("span_name_t_1", Attributes.EMPTY)); + Span span = defaultTracer.startSpan(buildSpanCreationContext("span_name_t_1", Attributes.EMPTY, null)); SpanScope spanScope = defaultTracer.withSpanInScope(span); CompletableFuture asyncTask1 = CompletableFuture.runAsync(() -> { - Span spanT2 = defaultTracer.startSpan(new SpanCreationContext("span_name_t_2", Attributes.EMPTY)); + Span spanT2 = defaultTracer.startSpan(buildSpanCreationContext("span_name_t_2", Attributes.EMPTY, null)); SpanScope spanScopeT2 = defaultTracer.withSpanInScope(spanT2); assertEquals(spanT2, defaultTracer.getCurrentSpan().getSpan()); @@ -289,7 +309,7 @@ public void testSpanCloseOnThread2() { tracingTelemetry ); DefaultTracer defaultTracer = new DefaultTracer(tracingTelemetry, spanTracerStorage); - final Span span = defaultTracer.startSpan(new SpanCreationContext("span_name_t1", Attributes.EMPTY)); + final Span span = defaultTracer.startSpan(buildSpanCreationContext("span_name_t1", Attributes.EMPTY, null)); try (SpanScope spanScope = defaultTracer.withSpanInScope(span)) { CompletableFuture asyncTask = CompletableFuture.runAsync(() -> async(new ActionListener() { @Override @@ -337,16 +357,16 @@ public void testSpanAcrossThreadsMultipleSpans() { CompletableFuture asyncTask = CompletableFuture.runAsync(() -> { // create a parent span - Span parentSpan = defaultTracer.startSpan(new SpanCreationContext("p_span_name", Attributes.EMPTY)); + Span parentSpan = defaultTracer.startSpan(buildSpanCreationContext("p_span_name", Attributes.EMPTY, null)); SpanScope parentSpanScope = defaultTracer.withSpanInScope(parentSpan); // create a span - Span span = defaultTracer.startSpan(new SpanCreationContext("span_name_t_1", Attributes.EMPTY)); + Span span = defaultTracer.startSpan(buildSpanCreationContext("span_name_t_1", Attributes.EMPTY, null)); SpanScope spanScope = defaultTracer.withSpanInScope(span); CompletableFuture asyncTask1 = CompletableFuture.runAsync(() -> { - Span spanT2 = defaultTracer.startSpan(new SpanCreationContext("span_name_t_2", Attributes.EMPTY)); + Span spanT2 = defaultTracer.startSpan(buildSpanCreationContext("span_name_t_2", Attributes.EMPTY, null)); SpanScope spanScopeT2 = defaultTracer.withSpanInScope(spanT2); - Span spanT21 = defaultTracer.startSpan(new SpanCreationContext("span_name_t_2", Attributes.EMPTY)); + Span spanT21 = defaultTracer.startSpan(buildSpanCreationContext("span_name_t_2", Attributes.EMPTY, null)); SpanScope spanScopeT21 = defaultTracer.withSpanInScope(spanT21); assertEquals(spanT21, defaultTracer.getCurrentSpan().getSpan()); spanScopeT21.close(); @@ -390,7 +410,16 @@ private void setupMocks() { when(mockSpan.getParentSpan()).thenReturn(mockParentSpan); when(mockParentSpan.getSpanId()).thenReturn("parent_span_id"); when(mockParentSpan.getTraceId()).thenReturn("trace_id"); - when(mockTracerContextStorage.get(TracerContextStorage.CURRENT_SPAN)).thenReturn(mockParentSpan, mockSpan); - when(mockTracingTelemetry.createSpan(eq("span_name"), eq(mockParentSpan), any(Attributes.class))).thenReturn(mockSpan); + spanCreationContext = buildSpanCreationContext("span_name", Attributes.EMPTY, mockParentSpan); + when(mockTracerContextStorage.get(TracerContextStorage.CURRENT_SPAN)).thenReturn(mockSpan, mockParentSpan); + when(mockTracingTelemetry.createSpan(eq(spanCreationContext), eq(mockParentSpan))).thenReturn(mockSpan); + } + + private SpanCreationContext buildSpanCreationContext(String spanName, Attributes attributes, Span parentSpan) { + SpanCreationContext spanCreationContext = SpanCreationContext.internal().name(spanName).attributes(attributes); + if (parentSpan != null) { + spanCreationContext.parent(new SpanContext(parentSpan)); + } + return spanCreationContext; } } diff --git a/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/TraceableRunnableTests.java b/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/TraceableRunnableTests.java index a67d9b22ca738..4c4f762653d57 100644 --- a/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/TraceableRunnableTests.java +++ b/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/TraceableRunnableTests.java @@ -34,9 +34,7 @@ public void testRunnableWithNullParent() throws Exception { final AtomicReference attributeValue = new AtomicReference<>(); TraceableRunnable traceableRunnable = new TraceableRunnable( defaultTracer, - spanName, - null, - Attributes.create().addAttribute("name", "value"), + SpanCreationContext.internal().name(spanName).attributes(Attributes.create().addAttribute("name", "value")), () -> { spanNameCaptured.set(defaultTracer.getCurrentSpan().getSpan().getSpanName()); attributeValue.set((String) ((MockSpan) defaultTracer.getCurrentSpan().getSpan()).getAttribute("name")); @@ -55,14 +53,23 @@ public void testRunnableWithParent() throws Exception { String spanName = "testRunnable"; String parentSpanName = "parentSpan"; DefaultTracer defaultTracer = new DefaultTracer(new MockTracingTelemetry(), contextStorage); - ScopedSpan scopedSpan = defaultTracer.startScopedSpan(new SpanCreationContext(parentSpanName, Attributes.EMPTY)); + ScopedSpan scopedSpan = defaultTracer.startScopedSpan( + SpanCreationContext.internal().name(parentSpanName).attributes(Attributes.EMPTY) + ); SpanContext parentSpanContext = defaultTracer.getCurrentSpan(); AtomicReference currentSpan = new AtomicReference<>(); final AtomicBoolean isRunnableCompleted = new AtomicBoolean(false); - TraceableRunnable traceableRunnable = new TraceableRunnable(defaultTracer, spanName, parentSpanContext, Attributes.EMPTY, () -> { - isRunnableCompleted.set(true); - currentSpan.set(defaultTracer.getCurrentSpan()); - }); + TraceableRunnable traceableRunnable = new TraceableRunnable( + defaultTracer, + SpanCreationContext.internal() + .name(spanName) + .attributes(Attributes.create().addAttribute("name", "value")) + .parent(parentSpanContext), + () -> { + isRunnableCompleted.set(true); + currentSpan.set(defaultTracer.getCurrentSpan()); + } + ); traceableRunnable.run(); assertTrue(isRunnableCompleted.get()); assertEquals(spanName, currentSpan.get().getSpan().getSpanName()); diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/MappingCharFilterFactory.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/MappingCharFilterFactory.java index bd241de749f11..d6d9f8975f2fc 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/MappingCharFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/MappingCharFilterFactory.java @@ -54,7 +54,7 @@ public class MappingCharFilterFactory extends AbstractCharFilterFactory implemen MappingCharFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name); - List> rules = Analysis.parseWordList(env, settings, "mappings", this::parse, false); + List> rules = Analysis.parseWordList(env, settings, "mappings", this::parse); if (rules == null) { throw new IllegalArgumentException("mapping requires either `mappings` or `mappings_path` to be configured"); } diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/BaseWordDelimiterTokenFilterFactoryTestCase.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/BaseWordDelimiterTokenFilterFactoryTestCase.java index 94c7d63f2bee7..f37d5862b9d3f 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/BaseWordDelimiterTokenFilterFactoryTestCase.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/BaseWordDelimiterTokenFilterFactoryTestCase.java @@ -211,8 +211,8 @@ private void createTokenFilterFactoryWithTypeTable(String[] rules) throws IOExce } public void testTypeTableParsingError() { - String[] rules = { "# This is a comment", "$ => DIGIT", "\\u200D => ALPHANUM", "abc => ALPHA" }; + String[] rules = { "# This is a comment", "# => ALPHANUM", "$ => DIGIT", "\\u200D => ALPHANUM", "abc => ALPHA" }; RuntimeException ex = expectThrows(RuntimeException.class, () -> createTokenFilterFactoryWithTypeTable(rules)); - assertEquals("Line [4]: Invalid mapping rule: [abc => ALPHA]. Only a single character is allowed.", ex.getMessage()); + assertEquals("Line [5]: Invalid mapping rule: [abc => ALPHA]. Only a single character is allowed.", ex.getMessage()); } } diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MappingCharFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MappingCharFilterFactoryTests.java index 387eb4a377007..28e041ac8c92d 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MappingCharFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MappingCharFilterFactoryTests.java @@ -37,6 +37,7 @@ public static CharFilterFactory create(String... rules) throws IOException { public void testRulesOk() throws IOException { MappingCharFilterFactory mappingCharFilterFactory = (MappingCharFilterFactory) create( + "# This is a comment", "# => _hashtag_", ":) => _happy_", ":( => _sad_" @@ -64,7 +65,10 @@ public void testRuleError() { } public void testRulePartError() { - RuntimeException ex = expectThrows(RuntimeException.class, () -> create("# => _hashtag_", ":) => _happy_", "a:b")); - assertEquals("Line [3]: Invalid mapping rule : [a:b]", ex.getMessage()); + RuntimeException ex = expectThrows( + RuntimeException.class, + () -> create("# This is a comment", "# => _hashtag_", ":) => _happy_", "a:b") + ); + assertEquals("Line [4]: Invalid mapping rule : [a:b]", ex.getMessage()); } } diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/40_token_filters.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/40_token_filters.yml index e92cc0c4838c7..802c79c780689 100644 --- a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/40_token_filters.yml +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/40_token_filters.yml @@ -127,6 +127,69 @@ - match: { tokens.2.token: brown } - match: { tokens.3.token: fox } + - do: + indices.analyze: + body: + text: 'text1 #text2' + tokenizer: whitespace + filter: + - type: word_delimiter + split_on_numerics: false + type_table: + - "\\u0023 => ALPHANUM" + - length: { tokens: 2 } + - match: { tokens.0.token: text1 } + - match: { tokens.0.start_offset: 0 } + - match: { tokens.0.end_offset: 5 } + - match: { tokens.0.position: 0 } + - match: { tokens.1.token: "#text2" } + - match: { tokens.1.start_offset: 6 } + - match: { tokens.1.end_offset: 12 } + - match: { tokens.1.position: 1 } + + - do: + indices.analyze: + body: + text: 'text1 #text2' + tokenizer: whitespace + filter: + - type: word_delimiter + split_on_numerics: false + type_table: + - "# This is a comment" + - "# => ALPHANUM" + - length: { tokens: 2 } + - match: { tokens.0.token: text1 } + - match: { tokens.0.start_offset: 0 } + - match: { tokens.0.end_offset: 5 } + - match: { tokens.0.position: 0 } + - match: { tokens.1.token: "#text2" } + - match: { tokens.1.start_offset: 6 } + - match: { tokens.1.end_offset: 12 } + - match: { tokens.1.position: 1 } + + - do: + indices.analyze: + body: + text: 'text1 #text2' + tokenizer: whitespace + filter: + - type: word_delimiter + split_on_numerics: false + type_table: + - "# This is a comment" + - "# => ALPHANUM" + - "@ => ALPHANUM" + - length: { tokens: 2 } + - match: { tokens.0.token: text1 } + - match: { tokens.0.start_offset: 0 } + - match: { tokens.0.end_offset: 5 } + - match: { tokens.0.position: 0 } + - match: { tokens.1.token: "#text2" } + - match: { tokens.1.start_offset: 6 } + - match: { tokens.1.end_offset: 12 } + - match: { tokens.1.position: 1 } + --- "word_delimiter_graph": - do: @@ -231,6 +294,69 @@ - match: { detail.tokenfilters.0.tokens.5.end_offset: 19 } - match: { detail.tokenfilters.0.tokens.5.position: 5 } + - do: + indices.analyze: + body: + text: 'text1 #text2' + tokenizer: whitespace + filter: + - type: word_delimiter_graph + split_on_numerics: false + type_table: + - "\\u0023 => ALPHANUM" + - length: { tokens: 2 } + - match: { tokens.0.token: text1 } + - match: { tokens.0.start_offset: 0 } + - match: { tokens.0.end_offset: 5 } + - match: { tokens.0.position: 0 } + - match: { tokens.1.token: "#text2" } + - match: { tokens.1.start_offset: 6 } + - match: { tokens.1.end_offset: 12 } + - match: { tokens.1.position: 1 } + + - do: + indices.analyze: + body: + text: 'text1 #text2' + tokenizer: whitespace + filter: + - type: word_delimiter_graph + split_on_numerics: false + type_table: + - "# This is a comment" + - "# => ALPHANUM" + - length: { tokens: 2 } + - match: { tokens.0.token: text1 } + - match: { tokens.0.start_offset: 0 } + - match: { tokens.0.end_offset: 5 } + - match: { tokens.0.position: 0 } + - match: { tokens.1.token: "#text2" } + - match: { tokens.1.start_offset: 6 } + - match: { tokens.1.end_offset: 12 } + - match: { tokens.1.position: 1 } + + - do: + indices.analyze: + body: + text: 'text1 #text2' + tokenizer: whitespace + filter: + - type: word_delimiter_graph + split_on_numerics: false + type_table: + - "# This is a comment" + - "# => ALPHANUM" + - "@ => ALPHANUM" + - length: { tokens: 2 } + - match: { tokens.0.token: text1 } + - match: { tokens.0.start_offset: 0 } + - match: { tokens.0.end_offset: 5 } + - match: { tokens.0.position: 0 } + - match: { tokens.1.token: "#text2" } + - match: { tokens.1.start_offset: 6 } + - match: { tokens.1.end_offset: 12 } + - match: { tokens.1.position: 1 } + --- "unique": - do: diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/50_char_filters.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/50_char_filters.yml index 0078575ae8e57..5e266c10cba8f 100644 --- a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/50_char_filters.yml +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/50_char_filters.yml @@ -69,6 +69,7 @@ char_filter: - type: mapping mappings: + - "# This is a comment" - "# => _hashsign_" - "@ => _atsign_" - length: { tokens: 3 } diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RemoveProcessor.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RemoveProcessor.java index 5da3b6bea7bc2..93a35eef4d396 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RemoveProcessor.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RemoveProcessor.java @@ -32,6 +32,7 @@ package org.opensearch.ingest.common; +import org.opensearch.core.common.Strings; import org.opensearch.ingest.AbstractProcessor; import org.opensearch.ingest.ConfigurationUtils; import org.opensearch.ingest.IngestDocument; @@ -66,16 +67,20 @@ public List getFields() { @Override public IngestDocument execute(IngestDocument document) { - if (ignoreMissing) { - fields.forEach(field -> { - String path = document.renderTemplate(field); - if (document.hasField(path)) { - document.removeField(path); + fields.forEach(field -> { + String path = document.renderTemplate(field); + final boolean fieldPathIsNullOrEmpty = Strings.isNullOrEmpty(path); + if (fieldPathIsNullOrEmpty || document.hasField(path) == false) { + if (ignoreMissing) { + return; + } else if (fieldPathIsNullOrEmpty) { + throw new IllegalArgumentException("field path cannot be null nor empty"); + } else { + throw new IllegalArgumentException("field [" + path + "] doesn't exist"); } - }); - } else { - fields.forEach(document::removeField); - } + } + document.removeField(path); + }); return document; } diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorTests.java index cf65236157111..8f729c6a39bbd 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorTests.java @@ -42,7 +42,6 @@ import java.util.HashMap; import java.util.Map; -import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; public class RemoveProcessorTests extends OpenSearchTestCase { @@ -67,12 +66,44 @@ public void testRemoveNonExistingField() throws Exception { config.put("field", fieldName); String processorTag = randomAlphaOfLength(10); Processor processor = new RemoveProcessor.Factory(TestTemplateService.instance()).create(null, processorTag, null, config); - try { - processor.execute(ingestDocument); - fail("remove field should have failed"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), containsString("not present as part of path [" + fieldName + "]")); - } + assertThrows( + "field [" + fieldName + "] doesn't exist", + IllegalArgumentException.class, + () -> { processor.execute(ingestDocument); } + ); + + Map configWithEmptyField = new HashMap<>(); + configWithEmptyField.put("field", ""); + processorTag = randomAlphaOfLength(10); + Processor removeProcessorWithEmptyField = new RemoveProcessor.Factory(TestTemplateService.instance()).create( + null, + processorTag, + null, + configWithEmptyField + ); + assertThrows( + "field path cannot be null nor empty", + IllegalArgumentException.class, + () -> removeProcessorWithEmptyField.execute(ingestDocument) + ); + } + + public void testRemoveEmptyField() throws Exception { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); + Map config = new HashMap<>(); + config.put("field", ""); + String processorTag = randomAlphaOfLength(10); + Processor removeProcessorWithEmptyField = new RemoveProcessor.Factory(TestTemplateService.instance()).create( + null, + processorTag, + null, + config + ); + assertThrows( + "field path cannot be null nor empty", + IllegalArgumentException.class, + () -> removeProcessorWithEmptyField.execute(ingestDocument) + ); } public void testIgnoreMissing() throws Exception { @@ -84,5 +115,13 @@ public void testIgnoreMissing() throws Exception { String processorTag = randomAlphaOfLength(10); Processor processor = new RemoveProcessor.Factory(TestTemplateService.instance()).create(null, processorTag, null, config); processor.execute(ingestDocument); + + // when using template snippet, the resolved field path maybe empty + Map configWithEmptyField = new HashMap<>(); + configWithEmptyField.put("field", ""); + configWithEmptyField.put("ignore_missing", true); + processorTag = randomAlphaOfLength(10); + processor = new RemoveProcessor.Factory(TestTemplateService.instance()).create(null, processorTag, null, configWithEmptyField); + processor.execute(ingestDocument); } } diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/290_remove_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/290_remove_processor.yml new file mode 100644 index 0000000000000..ff5a17136afa2 --- /dev/null +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/290_remove_processor.yml @@ -0,0 +1,93 @@ +--- +teardown: + - do: + ingest.delete_pipeline: + id: "my_pipeline" + ignore: 404 + +--- +"Test remove processor with non-existing field and without ignore_missing": + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "remove" : { + "field" : "{{unknown}}" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: /field path cannot be null nor empty/ + index: + index: test + id: 1 + pipeline: "my_pipeline" + body: { message: "foo bar baz" } + +--- +"Test remove processor with resolved field path doesn't exist": + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "remove" : { + "field" : "{{foo}}" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: /field \[bar\] doesn\'t exist/ + index: + index: test + id: 1 + pipeline: "my_pipeline" + body: { + message: "foo bar baz", + foo: "bar" + } + +--- +"Test remove processor with non-existing field and ignore_missing": + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "remove" : { + "field" : "{{unknown}}", + "ignore_missing" : true + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "my_pipeline" + body: { message: "foo bar baz" } + + - do: + get: + index: test + id: 1 + - match: { _source.message: "foo bar baz" } diff --git a/modules/lang-expression/licenses/lucene-expressions-9.8.0-snapshot-4373c3b.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 6eaa40708e4ae..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9f8a34fc3d450343ab05ccb5af318a836a6a5fb3 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.8.0-snapshot-95cdd2e.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..c1daa91dd5433 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +57e2b0cca55da8ad856dfd60be42e6daabbc98c3 \ No newline at end of file diff --git a/modules/search-pipeline-common/README.md b/modules/search-pipeline-common/README.md new file mode 100644 index 0000000000000..70615d846987b --- /dev/null +++ b/modules/search-pipeline-common/README.md @@ -0,0 +1,202 @@ +- [Search Pipelines](#search-pipelines) + - [Architecture](#architecture) + - [Search Processors](#search-processors) + - [Creating a Search Processor](#creating-a-search-processor) + - [Creating a Pipeline](#creating-a-search-pipeline) + +# Search Pipelines + +This README briefly covers the two types of search processors, explains how you can use them to create search pipelines, and walks through the creation of a new processor. + +## Architecture + +Search pipelines allow cluster operators to create and reuse [components](#search-processors) to transform search queries and results. + +With search pipelines, the operator can combine multiple [search processors](#search-processors) to create a transform which acts on the search request and/or search response. + +Search pipelines offer numerous benefits: + +1. search processors living in OpenSearch can be used by _all_ calling applications; +2. search pipeline operations occur inside the OpenSearch cluster, so large results can be processed before returning to the calling application\*; +3. search processors can be distributed in plugins to be shared with other OpenSearch users; +4. search pipelines only need to be modified once (and without changing or redeploying any calling applications) to have a change occur on all incoming queries\*\*; +5. search pipelines support standard APIs for accessing metrics and disaster recovery. + +*Within a cluster, results are passed using a more efficient, but version-specific binary protocol. You can pass result information back to a coordinator, allow it to post-process (e.g. rerank or collapse), and finally truncate it before sending it to the client over the less efficient but flexible JSON API. + +**For example, the `FilterQueryRequestProcessor` could be used to exclude search results immediately, without needing to make a code change in the application layer and deploy the change across your fleet. + +## Search Processors + +You can create many search pipelines by combining search processors in various orders. There are two types of search processors: + +1. search request processors which transform a request _before_ it is executed; +2. search response processors which transform the output of a request _after_ it is executed. + +You can find all existing search processors registered in `SearchPipelineCommonModulePlugin.java` and described on the documentation website. + +### Creating a search processor + +New search processors can be created in two different ways. + +Generally, a search processor can be created in your own `SearchPipelinePlugin`. This method is best for when you are creating a unique search +processor for your niche application. This method should also be used when your processor relies on an outside service. To get started creating a search processor in a `SearchPipelinePlugin`, you can use the [plugin template](https://github.com/opensearch-project/opensearch-plugin-template-java ). + +Alternatively, if you think your processor may be valuable to _all_ OpenSearch users you can follow these steps: + +1. Create a new class in `org.opensearch.search.pipeline.common`, this class will hold your new processor and should include whether it is a request or response processor. For example, a response processor which deleted a target field could be called `DeleteFieldResponseProcessor`. + +2. Make the class extend the generic `AbstractProcessor` class as well as implement either the `SearchRequestProcessor` or `SearchResponseProcessor` class depending on what type of processor it is. In the `DeleteFieldResponseProcessor` example, this would look like: + +```public class DeleteFieldResponseProcessor extends AbstractProcessor implements SearchResponseProcessor``` + +3. Create the main functionality of your processor and implement the methods required by the implemented interface. This will be `SearchRequest processRequest(SearchRequest request) throws Exception;` for a search request processor or `SearchResponse processResponse(SearchRequest request, SearchResponse response) throws Exception;` for a search response processor. + +For the example field `DeleteFieldResponseProcessor`, this will look like: + +``` +@Override +public SearchResponse processResponse(SearchRequest request, SearchResponse response) throws Exception { + + boolean foundField = false; + SearchHit[] hits = response.getHits().getHits(); + for (SearchHit hit : hits) { + + // Process each hit as desired + + if (hit.hasSource()) { + // Change hit source if needed + ); + + Map sourceAsMap = typeAndSourceMap.v2(); + if (sourceAsMap.containsKey(field)) { + // Handle source as map + } + } + + if (!foundField && !ignoreMissing) { + // Handle error scenarios + } + + return response; +} +``` + +4. Create a factory to parse processor-specific JSON configurations. These are used for constructing a processor instance. + +In the `DeleteFieldResponseProcessor`, this would look something like: + +``` +public static final class Factory implements Processor.Factory { + + /** + * Constructor for factory + */ + Factory() {} + + @Override + public DeleteFieldResponseProcessor create( + Map> processorFactories, + String tag, + String description, + boolean ignoreFailure, + Map config, + PipelineContext pipelineContext + ) throws Exception { + String field = ConfigurationUtils.readStringProperty(TYPE, tag, config, "field"); + boolean ignoreMissing = ConfigurationUtils.readBooleanProperty(TYPE, tag, config, "ignore_missing", false); + return new DeleteFieldResponseProcessor(tag, description, ignoreFailure, field, ignoreMissing); + } +} +``` + +In this example, we provide specific configurations for which field should be deleted and whether the processor should ignore attempts to remove a non-existent field. + +5. Add the newly added search processor to the `SearchPieplineCommonModulePlugin` getter for the corresponding processor type. + +For the `DeleteFieldResponseProcessor`, you would modify the response processor getter to have: + +``` +@Override +public Map> getResponseProcessors(Parameters parameters) { + return Map.of( + RenameFieldResponseProcessor.TYPE, + new RenameFieldResponseProcessor.Factory(), + DeleteFieldResponseProcessor.TYPE, + new DeleteFieldResponseProcessor.Factory() + ); +} +``` + +6. After creating a search processor, the processor is ready to be tested in a search pipeline. + +To test your new search processor, you can make use of the test [`SearchPipelineCommonYamlTestSuiteIT`](src/yamlRestTest/java/org/opensearch/search/pipeline/common). + +Following the format of the YAML files in [`rest-api-spec.test.search_pipeline`](src/yamlRestTest/resources/rest-api-spec/test/search_pipeline), you should be able to create your own YAML test file to exercise your new processor. + +To run the tests, from the root of the OpenSearch repository, you can run `./gradlew :modules:search-pipeline-common:yamlRestTest`. + +7. Finally, the processor is ready to used in a cluster. + +To use the new processor, make sure the cluster is reloaded and that the new processor is accessible. + +The new processor should show when calling `GET /_nodes/search_pipelines`. + +If the new processor is shown in the cURL response, the new processor should be available for use in a search pipeline. + +## Creating a Search Pipeline + +To create a search pipeline, you must create an ordered list of search processors in the OpenSearch cluster. + +An example creation request is: + +``` +PUT /_search/pipeline/my_pipeline +{ + "request_processors": [ + { + "filter_query" : { + "tag" : "tag1", + "description" : "This processor is going to restrict to publicly visible documents", + "query" : { + "term": { + "visibility": "public" + } + } + } + } + ], + "response_processors": [ + { + "rename_field": { + "field": "message", + "target_field": "notification" + } + } + ] +} +``` + +Alternatively, if you want to use just the `DeleteFieldResponseProcessor` created before, you would use: + +``` +PUT /_search/pipeline/my_pipeline2 + +{ + "response_processors": [ + { + "delete_field": { + "field": "message" + } + } + ] +} +``` + +## Running a search request using a search pipeline + +To run a search request using a search pipeline, you first need to create the pipeline using the request format shown above. + +After that is completed, you can run a request using the format: `POST /myindex/_search?search_pipeline=`. + +In the example of the `DeleteFieldResponseProcessor` this would be called with `POST /myindex/_search?search_pipeline=my_pipeline2`. diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0-snapshot-4373c3b.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 8a3332c950b6d..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fde64e3b23bc9a0849b9897febfe9f13c5113143 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0-snapshot-95cdd2e.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..035b47c5f388c --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +0deb3b85eadf831be17b48acab0785fd9d34fc44 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0-snapshot-4373c3b.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 33c2afacf2395..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b01a791705fa01fce48dd02ea79fa8045de8dd5e \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0-snapshot-95cdd2e.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..6ff5a433f0a4e --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +9a204267d68ce4ba36bfddc366cd6865cf5e1378 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0-snapshot-4373c3b.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 1e7986dafa11e..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -43d19320b1b9cd18638b1602fa87d5f21ee043bc \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0-snapshot-95cdd2e.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..a65ab33a31e2a --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +71e8e811f873ba2b47c7ecf9d890cbeac5b6be41 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0-snapshot-4373c3b.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 14880d9c2d243..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9244dc232f175010b480d4d88e13945c17a0b28b \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0-snapshot-95cdd2e.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..04ab7b7e7adb8 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +6e1274273895365bd83391cc4b79f5264479f5de \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0-snapshot-4373c3b.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index edc4de3fffe28..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3101a4f79820c1ca3dfb8f49b74c5fb5b32940e1 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0-snapshot-95cdd2e.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..cef3f97d03c51 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +e634c8685edad2bdb5c13748b18c0c1a46bb63a3 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0-snapshot-4373c3b.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 54c310277b09b..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f12b2a22cd5ebcd84f40a40e78fdd4e268b3b26d \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0-snapshot-95cdd2e.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..3e2dd19a9dd85 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +0afdf2afacbae39414ed06325fbb4bed17c07a7d \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0-snapshot-4373c3b.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 358db9ea3f0f5..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7dbf5cc3dff93cc1ffe45d79b129859590d001dd \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0-snapshot-95cdd2e.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..8c0544acd1ca0 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +166e2ea297182f7bf7070af02aacea9e6a3a19c8 \ No newline at end of file diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapper.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapper.java index a1918f3c954d0..18c9dd222e2cf 100644 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapper.java +++ b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapper.java @@ -8,6 +8,7 @@ package org.opensearch.plugin.correlation.core.index.mapper; +import org.apache.lucene.codecs.KnnVectorsFormat; import org.apache.lucene.document.FieldType; import org.apache.lucene.document.KnnFloatVectorField; import org.apache.lucene.document.StoredField; @@ -23,8 +24,6 @@ import java.util.Locale; import java.util.Optional; -import static org.apache.lucene.index.FloatVectorValues.MAX_DIMENSIONS; - /** * Field mapper for the correlation vector type * @@ -32,7 +31,7 @@ */ public class CorrelationVectorFieldMapper extends VectorFieldMapper { - private static final int LUCENE_MAX_DIMENSION = MAX_DIMENSIONS; + private static final int LUCENE_MAX_DIMENSION = KnnVectorsFormat.DEFAULT_MAX_DIMENSIONS; private final FieldType vectorFieldType; diff --git a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapperTests.java b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapperTests.java index 70311a9279bbd..674f35069a742 100644 --- a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapperTests.java +++ b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapperTests.java @@ -191,7 +191,7 @@ public void testTypeParser_parse_fromCorrelationParamsContext_InvalidVectorSimil /** * test parseCreateField in CorrelationVectorFieldMapper - * @throws IOException + * @throws IOException ioexception */ public void testCorrelationVectorFieldMapper_parseCreateField() throws IOException { String fieldName = "test-field-name"; diff --git a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilderTests.java b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilderTests.java index b3fcbedd74558..134c6519d7220 100644 --- a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilderTests.java +++ b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilderTests.java @@ -203,7 +203,7 @@ public void testDoToQueryInvalidFieldType() { /** * test serialization of Correlation Query Builder - * @throws Exception + * @throws Exception Exception */ public void testSerialization() throws Exception { assertSerialization(Optional.empty()); diff --git a/plugins/identity-shiro/licenses/bcprov-jdk15to18-1.75.jar.sha1 b/plugins/identity-shiro/licenses/bcprov-jdk15to18-1.75.jar.sha1 deleted file mode 100644 index 9911bb75f9209..0000000000000 --- a/plugins/identity-shiro/licenses/bcprov-jdk15to18-1.75.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -df22e1b6a9f6b218913f5b68dd16641344397fe0 \ No newline at end of file diff --git a/plugins/identity-shiro/licenses/bcprov-jdk15to18-1.76.jar.sha1 b/plugins/identity-shiro/licenses/bcprov-jdk15to18-1.76.jar.sha1 new file mode 100644 index 0000000000000..2e96c404bef98 --- /dev/null +++ b/plugins/identity-shiro/licenses/bcprov-jdk15to18-1.76.jar.sha1 @@ -0,0 +1 @@ +0cb53f10290a634808555bc4b34328fdab1001f2 \ No newline at end of file diff --git a/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroTokenManager.java b/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroTokenManager.java index 60239d609a08a..ddfb99e626718 100644 --- a/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroTokenManager.java +++ b/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroTokenManager.java @@ -64,9 +64,9 @@ public Optional translateAuthToken(org.opensearch.identity. public AuthToken issueOnBehalfOfToken(Subject subject, OnBehalfOfClaims claims) { String password = generatePassword(); - final byte[] rawEncoded = Base64.getEncoder().encode((claims.getAudience() + ":" + password).getBytes(UTF_8)); // Make a new - // ShiroSubject w/ - // audience as name + // Make a new ShiroSubject audience as name + final byte[] rawEncoded = Base64.getUrlEncoder().encode((claims.getAudience() + ":" + password).getBytes(UTF_8)); + final String usernamePassword = new String(rawEncoded, UTF_8); final String header = "Basic " + usernamePassword; BasicAuthToken token = new BasicAuthToken(header); @@ -75,6 +75,19 @@ public AuthToken issueOnBehalfOfToken(Subject subject, OnBehalfOfClaims claims) return token; } + @Override + public AuthToken issueServiceAccountToken(String audience) { + + String password = generatePassword(); + final byte[] rawEncoded = Base64.getUrlEncoder().withoutPadding().encode((audience + ":" + password).getBytes(UTF_8)); // Make a new + final String usernamePassword = new String(rawEncoded, UTF_8); + final String header = "Basic " + usernamePassword; + + BasicAuthToken token = new BasicAuthToken(header); + shiroTokenPasswordMap.put(token, password); + return token; + } + @Override public Subject authenticateToken(AuthToken authToken) { return new NoopSubject(); diff --git a/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/AuthTokenHandlerTests.java b/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/AuthTokenHandlerTests.java index 7c7414843d86f..db77ced298991 100644 --- a/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/AuthTokenHandlerTests.java +++ b/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/AuthTokenHandlerTests.java @@ -167,6 +167,18 @@ public void testTokenNoopIssuance() { Subject subject = new NoopSubject(); AuthToken token = tokenManager.issueOnBehalfOfToken(subject, claims); assertTrue(token instanceof AuthToken); + AuthToken serviceAccountToken = tokenManager.issueServiceAccountToken("test"); + assertTrue(serviceAccountToken instanceof AuthToken); + assertEquals(serviceAccountToken.asAuthHeaderValue(), "noopToken"); } + public void testShouldSucceedIssueServiceAccountToken() { + String audience = "testExtensionName"; + BasicAuthToken authToken = (BasicAuthToken) shiroAuthTokenHandler.issueServiceAccountToken(audience); + assertTrue(authToken instanceof BasicAuthToken); + UsernamePasswordToken translatedToken = (UsernamePasswordToken) shiroAuthTokenHandler.translateAuthToken(authToken).get(); + assertEquals(authToken.getPassword(), new String(translatedToken.getPassword())); + assertTrue(shiroAuthTokenHandler.getShiroTokenPasswordMap().containsKey(authToken)); + assertEquals(shiroAuthTokenHandler.getShiroTokenPasswordMap().get(authToken), new String(translatedToken.getPassword())); + } } diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index f54c80e9f76c1..330a17c02bc7a 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -93,7 +93,7 @@ dependencies { // Microsoft Word files with visio diagrams api 'org.apache.commons:commons-math3:3.6.1' // POIs dependency - api 'com.zaxxer:SparseBitSet:1.2' + api 'com.zaxxer:SparseBitSet:1.3' } restResources { diff --git a/plugins/ingest-attachment/licenses/SparseBitSet-1.2.jar.sha1 b/plugins/ingest-attachment/licenses/SparseBitSet-1.2.jar.sha1 deleted file mode 100644 index 5f1d015b87ac7..0000000000000 --- a/plugins/ingest-attachment/licenses/SparseBitSet-1.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8467c813d442837fcaeddbc42cf5c5359fab4933 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/SparseBitSet-1.3.jar.sha1 b/plugins/ingest-attachment/licenses/SparseBitSet-1.3.jar.sha1 new file mode 100644 index 0000000000000..2803db7c91e30 --- /dev/null +++ b/plugins/ingest-attachment/licenses/SparseBitSet-1.3.jar.sha1 @@ -0,0 +1 @@ +533eac055afe3d5f614ea95e333afd6c2bde8f26 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcmail-jdk15to18-1.75.jar.sha1 b/plugins/ingest-attachment/licenses/bcmail-jdk15to18-1.75.jar.sha1 deleted file mode 100644 index e6840a9b02b38..0000000000000 --- a/plugins/ingest-attachment/licenses/bcmail-jdk15to18-1.75.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b316bcd094e3917b1ece93a6edbab93f8315fb3b \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcmail-jdk15to18-1.76.jar.sha1 b/plugins/ingest-attachment/licenses/bcmail-jdk15to18-1.76.jar.sha1 new file mode 100644 index 0000000000000..46010d64015ad --- /dev/null +++ b/plugins/ingest-attachment/licenses/bcmail-jdk15to18-1.76.jar.sha1 @@ -0,0 +1 @@ +23d8bcad6b57912e4633ca9955926ffcdf3c5c71 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcpkix-jdk15to18-1.75.jar.sha1 b/plugins/ingest-attachment/licenses/bcpkix-jdk15to18-1.75.jar.sha1 deleted file mode 100644 index 9181b1c3ab1b6..0000000000000 --- a/plugins/ingest-attachment/licenses/bcpkix-jdk15to18-1.75.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f16e5252ad7a46d5eaf255231b0a5da307599082 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcpkix-jdk15to18-1.76.jar.sha1 b/plugins/ingest-attachment/licenses/bcpkix-jdk15to18-1.76.jar.sha1 new file mode 100644 index 0000000000000..a843d972ac681 --- /dev/null +++ b/plugins/ingest-attachment/licenses/bcpkix-jdk15to18-1.76.jar.sha1 @@ -0,0 +1 @@ +3ee440dfa1c557c1cc0c46b5dadf5ef3896ccebb \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcprov-jdk15to18-1.75.jar.sha1 b/plugins/ingest-attachment/licenses/bcprov-jdk15to18-1.75.jar.sha1 deleted file mode 100644 index 9911bb75f9209..0000000000000 --- a/plugins/ingest-attachment/licenses/bcprov-jdk15to18-1.75.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -df22e1b6a9f6b218913f5b68dd16641344397fe0 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcprov-jdk15to18-1.76.jar.sha1 b/plugins/ingest-attachment/licenses/bcprov-jdk15to18-1.76.jar.sha1 new file mode 100644 index 0000000000000..2e96c404bef98 --- /dev/null +++ b/plugins/ingest-attachment/licenses/bcprov-jdk15to18-1.76.jar.sha1 @@ -0,0 +1 @@ +0cb53f10290a634808555bc4b34328fdab1001f2 \ No newline at end of file diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 1f73d6a4d7520..ec040a180876e 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -60,7 +60,7 @@ dependencies { api 'com.google.api-client:google-api-client:2.2.0' - api 'com.google.api.grpc:proto-google-common-protos:2.10.0' + api 'com.google.api.grpc:proto-google-common-protos:2.25.0' api 'com.google.api.grpc:proto-google-iam-v1:0.12.0' api "com.google.auth:google-auth-library-credentials:${versions.google_auth}" @@ -78,7 +78,7 @@ dependencies { api 'com.google.http-client:google-http-client:1.43.2' api 'com.google.http-client:google-http-client-appengine:1.43.2' api 'com.google.http-client:google-http-client-gson:1.43.3' - api 'com.google.http-client:google-http-client-jackson2:1.43.2' + api 'com.google.http-client:google-http-client-jackson2:1.43.3' api 'com.google.oauth-client:google-oauth-client:1.34.1' diff --git a/plugins/repository-gcs/licenses/google-http-client-jackson2-1.43.2.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-jackson2-1.43.2.jar.sha1 deleted file mode 100644 index 7b606a07651ed..0000000000000 --- a/plugins/repository-gcs/licenses/google-http-client-jackson2-1.43.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5e52a9967ebd8246fc4cca64df5f03608db5ac6e \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-jackson2-1.43.3.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-jackson2-1.43.3.jar.sha1 new file mode 100644 index 0000000000000..8380b9fb770b5 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-jackson2-1.43.3.jar.sha1 @@ -0,0 +1 @@ +689da86469d19a01c726c8c24477b95c8a834bbe \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-2.10.0.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-common-protos-2.10.0.jar.sha1 deleted file mode 100644 index bf97707836c70..0000000000000 --- a/plugins/repository-gcs/licenses/proto-google-common-protos-2.10.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cf5ac081c05682b0eba6659dee55352fde5852e1 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-2.25.0.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-common-protos-2.25.0.jar.sha1 new file mode 100644 index 0000000000000..b5ef7ee78e794 --- /dev/null +++ b/plugins/repository-gcs/licenses/proto-google-common-protos-2.25.0.jar.sha1 @@ -0,0 +1 @@ +adfa7c3d9b806969db75cf35fe4b286b3b8b1ce0 \ No newline at end of file diff --git a/plugins/telemetry-otel/build.gradle b/plugins/telemetry-otel/build.gradle index 511bae590bf13..45c9f522c09d8 100644 --- a/plugins/telemetry-otel/build.gradle +++ b/plugins/telemetry-otel/build.gradle @@ -30,6 +30,13 @@ dependencies { api "io.opentelemetry:opentelemetry-exporter-logging:${versions.opentelemetry}" api "io.opentelemetry:opentelemetry-semconv:${versions.opentelemetry}-alpha" api "io.opentelemetry:opentelemetry-sdk-logs:${versions.opentelemetry}" + api "io.opentelemetry:opentelemetry-exporter-otlp:${versions.opentelemetry}" + api "io.opentelemetry:opentelemetry-exporter-common:${versions.opentelemetry}" + api "io.opentelemetry:opentelemetry-exporter-otlp-common:${versions.opentelemetry}" + runtimeOnly "org.jetbrains.kotlin:kotlin-stdlib:${versions.kotlin}" + runtimeOnly "com.squareup.okhttp3:okhttp:4.11.0" + runtimeOnly "com.squareup.okio:okio-jvm:3.5.0" + runtimeOnly "io.opentelemetry:opentelemetry-exporter-sender-okhttp:${versions.opentelemetry}" testImplementation "io.opentelemetry:opentelemetry-sdk-testing:${versions.opentelemetry}" } @@ -44,6 +51,32 @@ thirdPartyAudit { ) ignoreMissingClasses( + 'android.net.http.X509TrustManagerExtensions', + 'android.net.ssl.SSLSockets', + 'android.os.Build$VERSION', + 'android.security.NetworkSecurityPolicy', + 'android.util.Log', + 'com.google.common.io.ByteStreams', + 'com.google.common.util.concurrent.ListenableFuture', + 'io.grpc.CallOptions', + 'io.grpc.Channel', + 'io.grpc.Drainable', + 'io.grpc.KnownLength', + 'io.grpc.ManagedChannel', + 'io.grpc.MethodDescriptor', + 'io.grpc.MethodDescriptor$Builder', + 'io.grpc.MethodDescriptor$Marshaller', + 'io.grpc.MethodDescriptor$MethodType', + 'io.grpc.stub.AbstractFutureStub', + 'io.grpc.stub.AbstractStub', + 'io.grpc.stub.ClientCalls', + 'org.bouncycastle.jsse.BCSSLParameters', + 'org.bouncycastle.jsse.BCSSLSocket', + 'org.conscrypt.Conscrypt', + 'org.conscrypt.Conscrypt$Version', + 'org.conscrypt.ConscryptHostnameVerifier', + 'org.openjsse.javax.net.ssl.SSLParameters', + 'org.openjsse.javax.net.ssl.SSLSocket', 'io.opentelemetry.api.events.EventEmitter', 'io.opentelemetry.api.events.EventEmitterBuilder', 'io.opentelemetry.api.events.EventEmitterProvider', @@ -68,7 +101,8 @@ thirdPartyAudit { 'io.opentelemetry.extension.incubator.metrics.LongGauge', 'io.opentelemetry.extension.incubator.metrics.LongGaugeAdviceConfigurer', 'io.opentelemetry.extension.incubator.metrics.LongHistogramAdviceConfigurer', - 'io.opentelemetry.extension.incubator.metrics.LongUpDownCounterAdviceConfigurer' + 'io.opentelemetry.extension.incubator.metrics.LongUpDownCounterAdviceConfigurer', + 'kotlin.io.path.PathsKt' ) } diff --git a/plugins/telemetry-otel/licenses/kotlin-stdlib-1.7.10.jar.sha1 b/plugins/telemetry-otel/licenses/kotlin-stdlib-1.7.10.jar.sha1 new file mode 100644 index 0000000000000..4d119fbf4df70 --- /dev/null +++ b/plugins/telemetry-otel/licenses/kotlin-stdlib-1.7.10.jar.sha1 @@ -0,0 +1 @@ +d2abf9e77736acc4450dc4a3f707fa2c10f5099d \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/kotlin-stdlib-LICENSE.txt b/plugins/telemetry-otel/licenses/kotlin-stdlib-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/telemetry-otel/licenses/kotlin-stdlib-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/protobuf-java-NOTICE.txt b/plugins/telemetry-otel/licenses/kotlin-stdlib-NOTICE.txt similarity index 100% rename from protobuf-java-NOTICE.txt rename to plugins/telemetry-otel/licenses/kotlin-stdlib-NOTICE.txt diff --git a/plugins/telemetry-otel/licenses/okhttp-4.11.0.jar.sha1 b/plugins/telemetry-otel/licenses/okhttp-4.11.0.jar.sha1 new file mode 100644 index 0000000000000..1fc0db6615cb5 --- /dev/null +++ b/plugins/telemetry-otel/licenses/okhttp-4.11.0.jar.sha1 @@ -0,0 +1 @@ +436932d695b2c43f2c86b8111c596179cd133d56 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/okhttp-LICENSE.txt b/plugins/telemetry-otel/licenses/okhttp-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/telemetry-otel/licenses/okhttp-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/telemetry-otel/licenses/okhttp-NOTICE.txt b/plugins/telemetry-otel/licenses/okhttp-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/telemetry-otel/licenses/okio-jvm-3.5.0.jar.sha1 b/plugins/telemetry-otel/licenses/okio-jvm-3.5.0.jar.sha1 new file mode 100644 index 0000000000000..7b19d32d872fa --- /dev/null +++ b/plugins/telemetry-otel/licenses/okio-jvm-3.5.0.jar.sha1 @@ -0,0 +1 @@ +d6a0bc7343210eff7dd5cfdd6eb9b5f0036638ce \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/okio-jvm-LICENSE.txt b/plugins/telemetry-otel/licenses/okio-jvm-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/telemetry-otel/licenses/okio-jvm-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/telemetry-otel/licenses/okio-jvm-NOTICE.txt b/plugins/telemetry-otel/licenses/okio-jvm-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.30.1.jar.sha1 new file mode 100644 index 0000000000000..eccb15f7b7c8e --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.30.1.jar.sha1 @@ -0,0 +1 @@ +f299d336dba1039478497f37b273dfa764c6faef \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-LICENSE.txt b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-NOTICE.txt b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.30.1.jar.sha1 new file mode 100644 index 0000000000000..e88b7514ee54d --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.30.1.jar.sha1 @@ -0,0 +1 @@ +15692246539571c41180aff2b55abe527b939a7b \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-LICENSE.txt b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-NOTICE.txt b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.30.1.jar.sha1 new file mode 100644 index 0000000000000..86937743208c6 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.30.1.jar.sha1 @@ -0,0 +1 @@ +947cf43a6411c4a323e14594431040a476ad43e8 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-LICENSE.txt b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-NOTICE.txt b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.30.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.30.1.jar.sha1 new file mode 100644 index 0000000000000..068926277253c --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.30.1.jar.sha1 @@ -0,0 +1 @@ +9f3a14515500e4df260ce7b10a668237a95ac791 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-LICENSE.txt b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-LICENSE.txt new file mode 100644 index 0000000000000..6b0b1270ff0ca --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-LICENSE.txt @@ -0,0 +1,203 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-NOTICE.txt b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java index b395a335a4d83..fe05cc8bb7a41 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java @@ -14,6 +14,8 @@ import org.opensearch.telemetry.tracing.sampler.ProbabilisticSampler; import org.opensearch.telemetry.tracing.sampler.RequestSampler; +import java.security.AccessController; +import java.security.PrivilegedAction; import java.util.concurrent.TimeUnit; import io.opentelemetry.api.OpenTelemetry; @@ -45,11 +47,13 @@ private OTelResourceProvider() {} * @return OpenTelemetry instance */ public static OpenTelemetry get(TelemetrySettings telemetrySettings, Settings settings) { - return get( - settings, - OTelSpanExporterFactory.create(settings), - ContextPropagators.create(W3CTraceContextPropagator.getInstance()), - Sampler.parentBased(new RequestSampler(new ProbabilisticSampler(telemetrySettings))) + return AccessController.doPrivileged( + (PrivilegedAction) () -> get( + settings, + OTelSpanExporterFactory.create(settings), + ContextPropagators.create(W3CTraceContextPropagator.getInstance()), + Sampler.parentBased(new RequestSampler(new ProbabilisticSampler(telemetrySettings))) + ) ); } diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelSpanKindConverter.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelSpanKindConverter.java new file mode 100644 index 0000000000000..4edb837082126 --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelSpanKindConverter.java @@ -0,0 +1,43 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import io.opentelemetry.api.trace.SpanKind; + +/** + * Converts {@link org.opensearch.telemetry.tracing.SpanKind} to OTel {@link SpanKind} + */ +final class OTelSpanKindConverter { + + /** + * Constructor. + */ + private OTelSpanKindConverter() {} + + /** + * SpanKind converter. + * @param spanKind span kind. + * @return otel attributes. + */ + static SpanKind convert(org.opensearch.telemetry.tracing.SpanKind spanKind) { + if (spanKind == null) { + return SpanKind.INTERNAL; + } else { + switch (spanKind) { + case CLIENT: + return SpanKind.CLIENT; + case SERVER: + return SpanKind.SERVER; + case INTERNAL: + default: + return SpanKind.INTERNAL; + } + } + } +} diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingTelemetry.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingTelemetry.java index ee2f17aabb7f9..53066ad4ad444 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingTelemetry.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingTelemetry.java @@ -10,7 +10,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.telemetry.tracing.attributes.Attributes; import java.io.Closeable; import java.io.IOException; @@ -47,8 +46,8 @@ public void close() { } @Override - public Span createSpan(String spanName, Span parentSpan, Attributes attributes) { - return createOtelSpan(spanName, parentSpan, attributes); + public Span createSpan(SpanCreationContext spanCreationContext, Span parentSpan) { + return createOtelSpan(spanCreationContext, parentSpan); } @Override @@ -56,18 +55,29 @@ public TracingContextPropagator getContextPropagator() { return new OTelTracingContextPropagator(openTelemetry); } - private Span createOtelSpan(String spanName, Span parentSpan, Attributes attributes) { - io.opentelemetry.api.trace.Span otelSpan = otelSpan(spanName, parentSpan, OTelAttributesConverter.convert(attributes)); - Span newSpan = new OTelSpan(spanName, otelSpan, parentSpan); + private Span createOtelSpan(SpanCreationContext spanCreationContext, Span parentSpan) { + io.opentelemetry.api.trace.Span otelSpan = otelSpan( + spanCreationContext.getSpanName(), + parentSpan, + OTelAttributesConverter.convert(spanCreationContext.getAttributes()), + OTelSpanKindConverter.convert(spanCreationContext.getSpanKind()) + ); + Span newSpan = new OTelSpan(spanCreationContext.getSpanName(), otelSpan, parentSpan); return newSpan; } - io.opentelemetry.api.trace.Span otelSpan(String spanName, Span parentOTelSpan, io.opentelemetry.api.common.Attributes attributes) { + io.opentelemetry.api.trace.Span otelSpan( + String spanName, + Span parentOTelSpan, + io.opentelemetry.api.common.Attributes attributes, + io.opentelemetry.api.trace.SpanKind spanKind + ) { return parentOTelSpan == null || !(parentOTelSpan instanceof OTelSpan) ? otelTracer.spanBuilder(spanName).setAllAttributes(attributes).startSpan() : otelTracer.spanBuilder(spanName) .setParent(Context.current().with(((OTelSpan) parentOTelSpan).getDelegateSpan())) .setAllAttributes(attributes) + .setSpanKind(spanKind) .startSpan(); } } diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/exporter/OTelSpanExporterFactory.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/exporter/OTelSpanExporterFactory.java index cd4ffb160903b..da7ce5c47d9ca 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/exporter/OTelSpanExporterFactory.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/exporter/OTelSpanExporterFactory.java @@ -16,6 +16,7 @@ import java.lang.invoke.MethodHandles; import java.lang.invoke.MethodType; +import java.lang.reflect.Method; import java.security.AccessController; import java.security.PrivilegedActionException; import java.security.PrivilegedExceptionAction; @@ -55,9 +56,17 @@ private static SpanExporter instantiateSpanExporter(Class spanExpo // Check we ourselves are not being called by unprivileged code. SpecialPermission.check(); return AccessController.doPrivileged((PrivilegedExceptionAction) () -> { + String methodName = "create"; + String getDefaultMethod = "getDefault"; + for (Method m : spanExporterProviderClass.getMethods()) { + if (m.getName().equals(getDefaultMethod)) { + methodName = getDefaultMethod; + break; + } + } try { return (SpanExporter) MethodHandles.publicLookup() - .findStatic(spanExporterProviderClass, "create", MethodType.methodType(spanExporterProviderClass)) + .findStatic(spanExporterProviderClass, methodName, MethodType.methodType(spanExporterProviderClass)) .asType(MethodType.methodType(SpanExporter.class)) .invokeExact(); } catch (Throwable e) { diff --git a/plugins/telemetry-otel/src/main/plugin-metadata/plugin-security.policy b/plugins/telemetry-otel/src/main/plugin-metadata/plugin-security.policy index 4480cbb2bab4b..726db3d3f4700 100644 --- a/plugins/telemetry-otel/src/main/plugin-metadata/plugin-security.policy +++ b/plugins/telemetry-otel/src/main/plugin-metadata/plugin-security.policy @@ -9,6 +9,8 @@ grant { permission java.lang.RuntimePermission "getClassLoader"; permission java.lang.RuntimePermission "accessDeclaredMembers"; + permission java.net.NetPermission "getProxySelector"; + permission java.net.SocketPermission "*", "connect,resolve"; }; diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelSpanKindConverterTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelSpanKindConverterTests.java new file mode 100644 index 0000000000000..d07e32d00a92a --- /dev/null +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelSpanKindConverterTests.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import org.opensearch.test.OpenSearchTestCase; + +import io.opentelemetry.api.trace.SpanKind; + +public class OTelSpanKindConverterTests extends OpenSearchTestCase { + + public void testSpanKindNullConverterNull() { + assertEquals(SpanKind.INTERNAL, OTelSpanKindConverter.convert(null)); + } + + public void testSpanKindConverter() { + assertEquals(SpanKind.INTERNAL, OTelSpanKindConverter.convert(org.opensearch.telemetry.tracing.SpanKind.INTERNAL)); + assertEquals(SpanKind.CLIENT, OTelSpanKindConverter.convert(org.opensearch.telemetry.tracing.SpanKind.CLIENT)); + assertEquals(SpanKind.SERVER, OTelSpanKindConverter.convert(org.opensearch.telemetry.tracing.SpanKind.SERVER)); + } + +} diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingTelemetryTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingTelemetryTests.java index 3f46cb621a8ec..505756318ff62 100644 --- a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingTelemetryTests.java +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingTelemetryTests.java @@ -35,11 +35,11 @@ public void testCreateSpanWithoutParent() { when(mockTracer.spanBuilder("span_name")).thenReturn(mockSpanBuilder); when(mockSpanBuilder.setAllAttributes(any(io.opentelemetry.api.common.Attributes.class))).thenReturn(mockSpanBuilder); when(mockSpanBuilder.startSpan()).thenReturn(mock(io.opentelemetry.api.trace.Span.class)); + when(mockSpanBuilder.setSpanKind(any(io.opentelemetry.api.trace.SpanKind.class))).thenReturn(mockSpanBuilder); Map attributeMap = Collections.singletonMap("name", "value"); Attributes attributes = Attributes.create().addAttribute("name", "value"); TracingTelemetry tracingTelemetry = new OTelTracingTelemetry(mockOpenTelemetry); - Span span = tracingTelemetry.createSpan("span_name", null, attributes); - + Span span = tracingTelemetry.createSpan(SpanCreationContext.internal().name("span_name").attributes(attributes), null); verify(mockSpanBuilder, never()).setParent(any()); verify(mockSpanBuilder).setAllAttributes(createAttribute(attributes)); assertNull(span.getParentSpan()); @@ -54,12 +54,13 @@ public void testCreateSpanWithParent() { when(mockSpanBuilder.setParent(any())).thenReturn(mockSpanBuilder); when(mockSpanBuilder.setAllAttributes(any(io.opentelemetry.api.common.Attributes.class))).thenReturn(mockSpanBuilder); when(mockSpanBuilder.startSpan()).thenReturn(mock(io.opentelemetry.api.trace.Span.class)); + when(mockSpanBuilder.setSpanKind(any(io.opentelemetry.api.trace.SpanKind.class))).thenReturn(mockSpanBuilder); Span parentSpan = new OTelSpan("parent_span", mock(io.opentelemetry.api.trace.Span.class), null); TracingTelemetry tracingTelemetry = new OTelTracingTelemetry(mockOpenTelemetry); Attributes attributes = Attributes.create().addAttribute("name", 1l); - Span span = tracingTelemetry.createSpan("span_name", parentSpan, attributes); + Span span = tracingTelemetry.createSpan(SpanCreationContext.internal().name("span_name").attributes(attributes), parentSpan); verify(mockSpanBuilder).setParent(any()); verify(mockSpanBuilder).setAllAttributes(createAttributeLong(attributes)); @@ -77,6 +78,7 @@ public void testCreateSpanWithParentWithMultipleAttributes() { when(mockSpanBuilder.setParent(any())).thenReturn(mockSpanBuilder); when(mockSpanBuilder.setAllAttributes(any(io.opentelemetry.api.common.Attributes.class))).thenReturn(mockSpanBuilder); when(mockSpanBuilder.startSpan()).thenReturn(mock(io.opentelemetry.api.trace.Span.class)); + when(mockSpanBuilder.setSpanKind(any(io.opentelemetry.api.trace.SpanKind.class))).thenReturn(mockSpanBuilder); Span parentSpan = new OTelSpan("parent_span", mock(io.opentelemetry.api.trace.Span.class), null); @@ -86,7 +88,7 @@ public void testCreateSpanWithParentWithMultipleAttributes() { .addAttribute("key2", 2.0) .addAttribute("key3", true) .addAttribute("key4", "key4"); - Span span = tracingTelemetry.createSpan("span_name", parentSpan, attributes); + Span span = tracingTelemetry.createSpan(SpanCreationContext.internal().name("span_name").attributes(attributes), parentSpan); io.opentelemetry.api.common.Attributes otelAttributes = io.opentelemetry.api.common.Attributes.builder() .put("key1", 1l) diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/exporter/DummySpanExporterWithGetDefault.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/exporter/DummySpanExporterWithGetDefault.java new file mode 100644 index 0000000000000..225cfa6ab2d1a --- /dev/null +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/exporter/DummySpanExporterWithGetDefault.java @@ -0,0 +1,37 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.exporter; + +import java.util.Collection; + +import io.opentelemetry.sdk.common.CompletableResultCode; +import io.opentelemetry.sdk.trace.data.SpanData; +import io.opentelemetry.sdk.trace.export.SpanExporter; + +public class DummySpanExporterWithGetDefault implements SpanExporter { + + public static DummySpanExporterWithGetDefault getDefault() { + return new DummySpanExporterWithGetDefault(); + } + + @Override + public CompletableResultCode export(Collection spans) { + return null; + } + + @Override + public CompletableResultCode flush() { + return null; + } + + @Override + public CompletableResultCode shutdown() { + return null; + } +} diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/exporter/OTelSpanExporterFactoryTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/exporter/OTelSpanExporterFactoryTests.java index d9a86ccd3180b..d71aef9366e21 100644 --- a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/exporter/OTelSpanExporterFactoryTests.java +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/exporter/OTelSpanExporterFactoryTests.java @@ -63,4 +63,15 @@ public void testSpanExporterNonSpanExporterClass() { } + public void testSpanExporterGetDefaultMethod() { + Settings settings = Settings.builder() + .put( + OTelTelemetrySettings.OTEL_TRACER_SPAN_EXPORTER_CLASS_SETTING.getKey(), + "org.opensearch.telemetry.tracing.exporter.DummySpanExporterWithGetDefault" + ) + .build(); + + assertTrue(OTelSpanExporterFactory.create(settings) instanceof DummySpanExporterWithGetDefault); + } + } diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java index 418d6b0eda469..aef363058b394 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java @@ -259,7 +259,7 @@ public void testIndexing() throws Exception { * This test verifies that during rolling upgrades the segment replication does not break when replica shards can * be running on older codec versions. * - * @throws Exception + * @throws Exception exception */ @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/7679") public void testIndexingWithSegRep() throws Exception { diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/10_pipeline_with_mustache_templates.yml b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/10_pipeline_with_mustache_templates.yml index e6a2a3d52e116..c043015281a9a 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/10_pipeline_with_mustache_templates.yml +++ b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/10_pipeline_with_mustache_templates.yml @@ -208,7 +208,7 @@ id: 1 - length: { _source: 2 } - match: { _source.do_nothing: "foo" } - - match: { _source.error: "processor first_processor [remove]: field [field_to_remove] not present as part of path [field_to_remove]" } + - match: { _source.error: "processor first_processor [remove]: field [field_to_remove] doesn't exist" } --- "Test rolling up json object arrays": diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml index 1f1f42890355e..3f79227ce64e8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml @@ -138,6 +138,35 @@ - is_false: nodes.$node_id.indices.translog - is_false: nodes.$node_id.indices.recovery +--- +"Metric - indexing doc_status": + - skip: + version: " - 2.99.99" + reason: "To be introduced in future release :: TODO: change if/when we backport to 2.x" + - do: + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id + + - do: + nodes.stats: { metric: indices, index_metric: indexing } + + - is_false: nodes.$node_id.indices.docs + - is_false: nodes.$node_id.indices.store + - is_true: nodes.$node_id.indices.indexing + - is_true: nodes.$node_id.indices.indexing.doc_status + - is_false: nodes.$node_id.indices.get + - is_false: nodes.$node_id.indices.search + - is_false: nodes.$node_id.indices.merges + - is_false: nodes.$node_id.indices.refresh + - is_false: nodes.$node_id.indices.flush + - is_false: nodes.$node_id.indices.warmer + - is_false: nodes.$node_id.indices.query_cache + - is_false: nodes.$node_id.indices.fielddata + - is_false: nodes.$node_id.indices.completion + - is_false: nodes.$node_id.indices.segments + - is_false: nodes.$node_id.indices.translog + - is_false: nodes.$node_id.indices.recovery --- "Metric - recovery": diff --git a/server/licenses/lucene-analysis-common-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-analysis-common-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index e7c7dc2bbc046..0000000000000 --- a/server/licenses/lucene-analysis-common-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1446b7641743a1082b566179d1bf2960f5a0724b \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-analysis-common-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..4ac89f2e792d7 --- /dev/null +++ b/server/licenses/lucene-analysis-common-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +8c82be3d997d781bb72d6d0eadade064dd2cd6db \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-backward-codecs-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index d0f64519cd6ff..0000000000000 --- a/server/licenses/lucene-backward-codecs-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -127032ea137d2501b24f0e35e5f9a2e1c7864633 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-backward-codecs-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..624b5174a444f --- /dev/null +++ b/server/licenses/lucene-backward-codecs-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +4c261d17c681c0d91171c67e192abfef59adea2e \ No newline at end of file diff --git a/server/licenses/lucene-core-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-core-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index dc363f2776429..0000000000000 --- a/server/licenses/lucene-core-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -30c3afcf058532d3d2b8820375043000e7f34a9b \ No newline at end of file diff --git a/server/licenses/lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..70baf1270cd5d --- /dev/null +++ b/server/licenses/lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +d2f7fbc5b2c49ca777a169d579f41082a9a57cc7 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-grouping-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 294beba43f62a..0000000000000 --- a/server/licenses/lucene-grouping-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e6f742efe0ef3b383468fe38f88ab2dd69ed3d2c \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-grouping-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..20ddb9ae3ef27 --- /dev/null +++ b/server/licenses/lucene-grouping-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +8d1cf3d6db43fad6630376ba59451f848f4d387c \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-highlighter-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index c2a2ef5b13946..0000000000000 --- a/server/licenses/lucene-highlighter-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3162856444777130dee2c4cabe1bf6d18710ff63 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-highlighter-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..c3ad03ca53b13 --- /dev/null +++ b/server/licenses/lucene-highlighter-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +83ab97638bb5269f950d75bba5675d3cfb63f2fa \ No newline at end of file diff --git a/server/licenses/lucene-join-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-join-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 7c6adaaba9cf1..0000000000000 --- a/server/licenses/lucene-join-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5fe8383516eca7300f978ce38042e327b0a57877 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-join-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..c2a4c5334b314 --- /dev/null +++ b/server/licenses/lucene-join-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +97c26362151908dc892263edda3872abbacb71a8 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-memory-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 586702c968a77..0000000000000 --- a/server/licenses/lucene-memory-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b3e77970485be6d2dd59b999bbaa65a2cb993744 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-memory-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..32534d07e47dc --- /dev/null +++ b/server/licenses/lucene-memory-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +8337eddc0dddd0d7dd50c5aa0d17e5e31592f9fa \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-misc-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 493598eefff5e..0000000000000 --- a/server/licenses/lucene-misc-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -86d667ea2f7fb2142d2acacf801dcea47d014a5e \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-misc-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..7db245cc521c7 --- /dev/null +++ b/server/licenses/lucene-misc-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +a2e3fae930295f0e2b401effe04eafc25692a414 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-queries-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 1bf937f10d795..0000000000000 --- a/server/licenses/lucene-queries-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -930d004de698f374da8ac5530fd80e241edeba45 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-queries-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..d01a6d733196e --- /dev/null +++ b/server/licenses/lucene-queries-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +e88d8a464e6cfa345b946c9c8822ba7ee2a9159f \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-queryparser-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 74458bc93f90b..0000000000000 --- a/server/licenses/lucene-queryparser-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f62882823d5aa9ed4cf0081a8c18f35e21992080 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-queryparser-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..c7b9640bad170 --- /dev/null +++ b/server/licenses/lucene-queryparser-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +9905790675c01e8dc24f9a5e6b9b28b879c65a52 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-sandbox-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 3231d0e067940..0000000000000 --- a/server/licenses/lucene-sandbox-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f1ec1527e283b423b7ff5e12cd8d889e7247199d \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-sandbox-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..c4cd9e47624f8 --- /dev/null +++ b/server/licenses/lucene-sandbox-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +d6c8be427ec8ffc7e8233ffbf0d190d95a56cf14 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-spatial-extras-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index dd47faf91f206..0000000000000 --- a/server/licenses/lucene-spatial-extras-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -de787c052879893e47d21fa161c93413665d55d7 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-spatial-extras-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..dfee145d3ea26 --- /dev/null +++ b/server/licenses/lucene-spatial-extras-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +11716d61288feaa692593bf699affa8de2b564c4 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-spatial3d-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 2b378438bfb14..0000000000000 --- a/server/licenses/lucene-spatial3d-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7e541ed960a571f5d9a0ecff5c26fd5ca857581e \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-spatial3d-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..c7410086ba86c --- /dev/null +++ b/server/licenses/lucene-spatial3d-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +3a888e06c0535403b9e58a8dcddeb5e6513a4930 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-suggest-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 1e3ed6561e3ef..0000000000000 --- a/server/licenses/lucene-suggest-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4b222ef09a5f20896d031a8322f2e69304c16384 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-suggest-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..6d8d4205f4d02 --- /dev/null +++ b/server/licenses/lucene-suggest-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +52dfc8bf135ed29f5baf0a967c1bb63dedb9a069 \ No newline at end of file diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/ConcurrentSearchTasksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/ConcurrentSearchTasksIT.java index ceacb028698de..c733329a1b5f7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/ConcurrentSearchTasksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/ConcurrentSearchTasksIT.java @@ -108,8 +108,9 @@ public void testConcurrentSearchTaskTracking() { assertEquals(mainTaskInfo.getTaskId(), taskInfo.getParentTaskId()); Map> threadStats = getThreadStats(SearchAction.NAME + "[*]", taskInfo.getTaskId()); - // Concurrent search forks each slice of 5 segments to different thread - assertEquals((int) Math.ceil(getSegmentCount(INDEX_NAME) / 5.0), threadStats.size()); + // Concurrent search forks each slice of 5 segments to different thread (see please + // https://github.com/apache/lucene/issues/12498) + assertEquals((int) Math.ceil(getSegmentCount(INDEX_NAME) / 5.0) + 1, threadStats.size()); // assert that all task descriptions have non-zero length MatcherAssert.assertThat(taskInfo.getDescription().length(), greaterThan(0)); diff --git a/server/src/internalClusterTest/java/org/opensearch/nodestats/NodeStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/nodestats/NodeStatsIT.java new file mode 100644 index 0000000000000..f270cb1399072 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/nodestats/NodeStatsIT.java @@ -0,0 +1,276 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.nodestats; + +import org.opensearch.ExceptionsHelper; +import org.opensearch.action.DocWriteResponse; +import org.opensearch.action.bulk.BulkItemResponse; +import org.opensearch.action.bulk.BulkRequest; +import org.opensearch.action.bulk.BulkResponse; +import org.opensearch.action.delete.DeleteRequest; +import org.opensearch.action.delete.DeleteResponse; +import org.opensearch.action.index.IndexRequest; +import org.opensearch.action.index.IndexResponse; +import org.opensearch.action.update.UpdateRequest; +import org.opensearch.action.update.UpdateResponse; +import org.opensearch.index.IndexNotFoundException; +import org.opensearch.index.engine.DocumentMissingException; +import org.opensearch.index.engine.VersionConflictEngineException; +import org.opensearch.index.shard.IndexingStats.Stats.DocStatusStats; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; +import org.opensearch.test.OpenSearchIntegTestCase.Scope; +import org.hamcrest.MatcherAssert; + +import java.util.Arrays; +import java.util.Comparator; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; + +import static java.util.Collections.singletonMap; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; + +@ClusterScope(scope = Scope.TEST, numDataNodes = 1, numClientNodes = 0, supportsDedicatedMasters = false) +public class NodeStatsIT extends OpenSearchIntegTestCase { + + private final DocStatusStats expectedDocStatusStats = new DocStatusStats(); + private static final String FIELD = "dummy_field"; + private static final String VALUE = "dummy_value"; + private static final Map SOURCE = singletonMap(FIELD, VALUE); + + public void testNodeIndicesStatsDocStatusStatsIndexBulk() { + { // Testing Index + final String INDEX = "test_index"; + final String ID = "id"; + { // Testing Normal Index + IndexResponse response = client().index(new IndexRequest(INDEX).id(ID).source(SOURCE)).actionGet(); + updateExpectedDocStatusCounter(response); + + MatcherAssert.assertThat(response.getResult(), equalTo(DocWriteResponse.Result.CREATED)); + assertDocStatusStats(); + } + { // Testing Missing Alias + updateExpectedDocStatusCounter( + expectThrows( + IndexNotFoundException.class, + () -> client().index(new IndexRequest(INDEX).id("missing_alias").setRequireAlias(true).source(SOURCE)).actionGet() + ) + ); + assertDocStatusStats(); + } + { + // Test Missing Pipeline: Ingestion failure, not Indexing failure + expectThrows( + IllegalArgumentException.class, + () -> client().index(new IndexRequest(INDEX).id("missing_pipeline").setPipeline("missing").source(SOURCE)).actionGet() + ); + assertDocStatusStats(); + } + { // Testing Version Conflict + final String docId = "version_conflict"; + + updateExpectedDocStatusCounter(client().index(new IndexRequest(INDEX).id(docId).source(SOURCE)).actionGet()); + updateExpectedDocStatusCounter( + expectThrows( + VersionConflictEngineException.class, + () -> client().index(new IndexRequest(INDEX).id(docId).source(SOURCE).setIfSeqNo(1L).setIfPrimaryTerm(99L)) + .actionGet() + ) + ); + assertDocStatusStats(); + } + } + { // Testing Bulk + final String INDEX = "bulk_index"; + + int sizeOfIndexRequests = scaledRandomIntBetween(10, 20); + int sizeOfDeleteRequests = scaledRandomIntBetween(5, sizeOfIndexRequests); + int sizeOfNotFoundRequests = scaledRandomIntBetween(5, sizeOfIndexRequests); + + BulkRequest bulkRequest = new BulkRequest(); + + for (int i = 0; i < sizeOfIndexRequests; ++i) { + bulkRequest.add(new IndexRequest(INDEX).id(String.valueOf(i)).source(SOURCE)); + } + + BulkResponse response = client().bulk(bulkRequest).actionGet(); + + MatcherAssert.assertThat(response.hasFailures(), equalTo(false)); + MatcherAssert.assertThat(response.getItems().length, equalTo(sizeOfIndexRequests)); + + for (BulkItemResponse itemResponse : response.getItems()) { + updateExpectedDocStatusCounter(itemResponse.getResponse()); + } + + refresh(INDEX); + bulkRequest.requests().clear(); + + for (int i = 0; i < sizeOfDeleteRequests; ++i) { + bulkRequest.add(new DeleteRequest(INDEX, String.valueOf(i))); + } + for (int i = 0; i < sizeOfNotFoundRequests; ++i) { + bulkRequest.add(new DeleteRequest(INDEX, String.valueOf(25 + i))); + } + + response = client().bulk(bulkRequest).actionGet(); + + MatcherAssert.assertThat(response.hasFailures(), equalTo(false)); + MatcherAssert.assertThat(response.getItems().length, equalTo(sizeOfDeleteRequests + sizeOfNotFoundRequests)); + + for (BulkItemResponse itemResponse : response.getItems()) { + updateExpectedDocStatusCounter(itemResponse.getResponse()); + } + + refresh(INDEX); + assertDocStatusStats(); + } + } + + public void testNodeIndicesStatsDocStatusStatsCreateDeleteUpdate() { + { // Testing Create + final String INDEX = "create_index"; + final String ID = "id"; + { // Testing Creation + IndexResponse response = client().index(new IndexRequest(INDEX).id(ID).source(SOURCE).create(true)).actionGet(); + updateExpectedDocStatusCounter(response); + + MatcherAssert.assertThat(response.getResult(), equalTo(DocWriteResponse.Result.CREATED)); + assertDocStatusStats(); + } + { // Testing Version Conflict + final String docId = "version_conflict"; + + updateExpectedDocStatusCounter(client().index(new IndexRequest(INDEX).id(docId).source(SOURCE)).actionGet()); + updateExpectedDocStatusCounter( + expectThrows( + VersionConflictEngineException.class, + () -> client().index(new IndexRequest(INDEX).id(docId).source(SOURCE).create(true)).actionGet() + ) + ); + assertDocStatusStats(); + } + } + { // Testing Delete + final String INDEX = "delete_index"; + final String ID = "id"; + { // Testing Deletion + IndexResponse response = client().index(new IndexRequest(INDEX).id(ID).source(SOURCE)).actionGet(); + updateExpectedDocStatusCounter(response); + + DeleteResponse deleteResponse = client().delete(new DeleteRequest(INDEX, ID)).actionGet(); + updateExpectedDocStatusCounter(deleteResponse); + + MatcherAssert.assertThat(response.getSeqNo(), greaterThanOrEqualTo(0L)); + MatcherAssert.assertThat(deleteResponse.getResult(), equalTo(DocWriteResponse.Result.DELETED)); + assertDocStatusStats(); + } + { // Testing Non-Existing Doc + updateExpectedDocStatusCounter(client().delete(new DeleteRequest(INDEX, "does_not_exist")).actionGet()); + assertDocStatusStats(); + } + { // Testing Version Conflict + final String docId = "version_conflict"; + + updateExpectedDocStatusCounter(client().index(new IndexRequest(INDEX).id(docId).source(SOURCE)).actionGet()); + updateExpectedDocStatusCounter( + expectThrows( + VersionConflictEngineException.class, + () -> client().delete(new DeleteRequest(INDEX, docId).setIfSeqNo(2L).setIfPrimaryTerm(99L)).actionGet() + ) + ); + + assertDocStatusStats(); + } + } + { // Testing Update + final String INDEX = "update_index"; + final String ID = "id"; + { // Testing Not Found + updateExpectedDocStatusCounter( + expectThrows( + DocumentMissingException.class, + () -> client().update(new UpdateRequest(INDEX, ID).doc(SOURCE)).actionGet() + ) + ); + assertDocStatusStats(); + } + { // Testing NoOp Update + updateExpectedDocStatusCounter(client().index(new IndexRequest(INDEX).id(ID).source(SOURCE)).actionGet()); + + UpdateResponse response = client().update(new UpdateRequest(INDEX, ID).doc(SOURCE)).actionGet(); + updateExpectedDocStatusCounter(response); + + MatcherAssert.assertThat(response.getResult(), equalTo(DocWriteResponse.Result.NOOP)); + assertDocStatusStats(); + } + { // Testing Update + final String UPDATED_VALUE = "updated_value"; + UpdateResponse response = client().update(new UpdateRequest(INDEX, ID).doc(singletonMap(FIELD, UPDATED_VALUE))).actionGet(); + updateExpectedDocStatusCounter(response); + + MatcherAssert.assertThat(response.getResult(), equalTo(DocWriteResponse.Result.UPDATED)); + assertDocStatusStats(); + } + { // Testing Missing Alias + updateExpectedDocStatusCounter( + expectThrows( + IndexNotFoundException.class, + () -> client().update(new UpdateRequest(INDEX, ID).setRequireAlias(true).doc(new IndexRequest().source(SOURCE))) + .actionGet() + ) + ); + assertDocStatusStats(); + } + { // Testing Version Conflict + final String docId = "version_conflict"; + + updateExpectedDocStatusCounter(client().index(new IndexRequest(INDEX).id(docId).source(SOURCE)).actionGet()); + updateExpectedDocStatusCounter( + expectThrows( + VersionConflictEngineException.class, + () -> client().update(new UpdateRequest(INDEX, docId).doc(SOURCE).setIfSeqNo(2L).setIfPrimaryTerm(99L)).actionGet() + ) + ); + assertDocStatusStats(); + } + } + } + + private void assertDocStatusStats() { + DocStatusStats docStatusStats = client().admin() + .cluster() + .prepareNodesStats() + .execute() + .actionGet() + .getNodes() + .get(0) + .getIndices() + .getIndexing() + .getTotal() + .getDocStatusStats(); + + assertTrue( + Arrays.equals( + docStatusStats.getDocStatusCounter(), + expectedDocStatusStats.getDocStatusCounter(), + Comparator.comparingLong(AtomicLong::longValue) + ) + ); + } + + private void updateExpectedDocStatusCounter(DocWriteResponse r) { + expectedDocStatusStats.inc(r.status()); + } + + private void updateExpectedDocStatusCounter(Exception e) { + expectedDocStatusStats.inc(ExceptionsHelper.status(e)); + } + +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java index 621fb262c0a91..157f8e41fee24 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java @@ -27,8 +27,6 @@ import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.MapperService; import org.opensearch.indices.replication.common.ReplicationType; -import org.opensearch.node.Node; -import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.repositories.fs.FsRepository; import org.opensearch.test.OpenSearchIntegTestCase; @@ -168,6 +166,29 @@ public static Settings remoteStoreClusterSettings(String name, Path path) { return remoteStoreClusterSettings(name, path, name, path); } + public static Settings remoteStoreClusterSettings( + String segmentRepoName, + Path segmentRepoPath, + String segmentRepoType, + String translogRepoName, + Path translogRepoPath, + String translogRepoType + ) { + Settings.Builder settingsBuilder = Settings.builder(); + settingsBuilder.put( + buildRemoteStoreNodeAttributes( + segmentRepoName, + segmentRepoPath, + segmentRepoType, + translogRepoName, + translogRepoPath, + translogRepoType, + false + ) + ); + return settingsBuilder.build(); + } + public static Settings remoteStoreClusterSettings( String segmentRepoName, Path segmentRepoPath, @@ -185,6 +206,26 @@ public static Settings buildRemoteStoreNodeAttributes( String translogRepoName, Path translogRepoPath, boolean withRateLimiterAttributes + ) { + return buildRemoteStoreNodeAttributes( + segmentRepoName, + segmentRepoPath, + FsRepository.TYPE, + translogRepoName, + translogRepoPath, + FsRepository.TYPE, + withRateLimiterAttributes + ); + } + + public static Settings buildRemoteStoreNodeAttributes( + String segmentRepoName, + Path segmentRepoPath, + String segmentRepoType, + String translogRepoName, + Path translogRepoPath, + String translogRepoType, + boolean withRateLimiterAttributes ) { String segmentRepoTypeAttributeKey = String.format( Locale.getDefault(), @@ -219,13 +260,13 @@ public static Settings buildRemoteStoreNodeAttributes( Settings.Builder settings = Settings.builder() .put("node.attr." + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, segmentRepoName) - .put(segmentRepoTypeAttributeKey, FsRepository.TYPE) + .put(segmentRepoTypeAttributeKey, segmentRepoType) .put(segmentRepoSettingsAttributeKeyPrefix + "location", segmentRepoPath) .put("node.attr." + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, translogRepoName) - .put(translogRepoTypeAttributeKey, FsRepository.TYPE) + .put(translogRepoTypeAttributeKey, translogRepoType) .put(translogRepoSettingsAttributeKeyPrefix + "location", translogRepoPath) .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, segmentRepoName) - .put(stateRepoTypeAttributeKey, FsRepository.TYPE) + .put(stateRepoTypeAttributeKey, segmentRepoType) .put(stateRepoSettingsAttributeKeyPrefix + "location", segmentRepoPath); if (withRateLimiterAttributes) { @@ -310,46 +351,6 @@ public void assertRemoteStoreRepositoryOnAllNodes(String repositoryName) { } } - public Settings buildClusterSettingsWith() { - String segmentRepoTypeAttributeKey = String.format( - Locale.getDefault(), - "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, - REPOSITORY_NAME - ); - String segmentRepoSettingsAttributeKeyPrefix = String.format( - Locale.getDefault(), - "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, - REPOSITORY_NAME - ); - String translogRepoTypeAttributeKey = String.format( - Locale.getDefault(), - "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, - REPOSITORY_2_NAME - ); - String translogRepoSettingsAttributeKeyPrefix = String.format( - Locale.getDefault(), - "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, - REPOSITORY_2_NAME - ); - return Settings.builder() - .put( - Node.NODE_ATTRIBUTES.getKey() + RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, - REPOSITORY_NAME - ) - .put(segmentRepoTypeAttributeKey, FsRepository.TYPE) - .put(segmentRepoSettingsAttributeKeyPrefix + "location", randomRepoPath()) - .put(segmentRepoSettingsAttributeKeyPrefix + "compress", randomBoolean()) - .put(segmentRepoSettingsAttributeKeyPrefix + "max_remote_download_bytes_per_sec", "2kb") - .put(segmentRepoSettingsAttributeKeyPrefix + "chunk_size", 200, ByteSizeUnit.BYTES) - .put( - Node.NODE_ATTRIBUTES.getKey() + RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, - REPOSITORY_2_NAME - ) - .put(translogRepoTypeAttributeKey, FsRepository.TYPE) - .put(translogRepoSettingsAttributeKeyPrefix + "location", randomRepoPath()) - .build(); - } - public static int getFileCount(Path path) throws Exception { final AtomicInteger filesExisting = new AtomicInteger(0); Files.walkFileTree(path, new SimpleFileVisitor<>() { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java index 3ccf563941f9c..1fb5c2052aded 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java @@ -8,6 +8,7 @@ package org.opensearch.remotestore; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; import org.opensearch.action.admin.indices.get.GetIndexRequest; @@ -24,7 +25,9 @@ import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.translog.Translog.Durability; import org.opensearch.indices.IndicesService; +import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; @@ -119,6 +122,16 @@ public void testPeerRecoveryWithRemoteStoreAndRemoteTranslogFlush() throws Excep testPeerRecovery(randomIntBetween(2, 5), true); } + public void testPeerRecoveryWithLowActivityTimeout() throws Exception { + ClusterUpdateSettingsRequest req = new ClusterUpdateSettingsRequest().persistentSettings( + Settings.builder() + .put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "20kb") + .put(RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING.getKey(), "1s") + ); + internalCluster().client().admin().cluster().updateSettings(req).get(); + testPeerRecovery(randomIntBetween(2, 5), true); + } + public void testPeerRecoveryWithRemoteStoreAndRemoteTranslogNoDataRefresh() throws Exception { testPeerRecovery(1, false); } @@ -323,6 +336,86 @@ public void testClusterBufferIntervalValidation() { ); } + public void testRequestDurabilityWhenRestrictSettingExplicitFalse() throws ExecutionException, InterruptedException { + // Explicit node settings and request durability + testRestrictSettingFalse(true, Durability.REQUEST); + } + + public void testAsyncDurabilityWhenRestrictSettingExplicitFalse() throws ExecutionException, InterruptedException { + // Explicit node settings and async durability + testRestrictSettingFalse(true, Durability.ASYNC); + } + + public void testRequestDurabilityWhenRestrictSettingImplicitFalse() throws ExecutionException, InterruptedException { + // No node settings and request durability + testRestrictSettingFalse(false, Durability.REQUEST); + } + + public void testAsyncDurabilityWhenRestrictSettingImplicitFalse() throws ExecutionException, InterruptedException { + // No node settings and async durability + testRestrictSettingFalse(false, Durability.ASYNC); + } + + private void testRestrictSettingFalse(boolean setRestrictFalse, Durability durability) throws ExecutionException, InterruptedException { + String clusterManagerName; + if (setRestrictFalse) { + clusterManagerName = internalCluster().startClusterManagerOnlyNode( + Settings.builder().put(IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING.getKey(), false).build() + ); + } else { + clusterManagerName = internalCluster().startClusterManagerOnlyNode(); + } + String dataNode = internalCluster().startDataOnlyNodes(1).get(0); + Settings indexSettings = Settings.builder() + .put(indexSettings()) + .put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), durability) + .build(); + createIndex(INDEX_NAME, indexSettings); + IndexShard indexShard = getIndexShard(dataNode); + assertEquals(durability, indexShard.indexSettings().getTranslogDurability()); + + durability = randomFrom(Durability.values()); + client(clusterManagerName).admin() + .indices() + .updateSettings( + new UpdateSettingsRequest(INDEX_NAME).settings( + Settings.builder().put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), durability) + ) + ) + .get(); + assertEquals(durability, indexShard.indexSettings().getTranslogDurability()); + } + + public void testAsyncDurabilityThrowsExceptionWhenRestrictSettingTrue() throws ExecutionException, InterruptedException { + String expectedExceptionMsg = + "index setting [index.translog.durability=async] is not allowed as cluster setting [cluster.remote_store.index.restrict.async-durability=true]"; + String clusterManagerName = internalCluster().startClusterManagerOnlyNode( + Settings.builder().put(IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING.getKey(), true).build() + ); + String dataNode = internalCluster().startDataOnlyNodes(1).get(0); + + // Case 1 - Test create index fails + Settings indexSettings = Settings.builder() + .put(indexSettings()) + .put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Durability.ASYNC) + .build(); + IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> createIndex(INDEX_NAME, indexSettings)); + assertEquals(expectedExceptionMsg, exception.getMessage()); + + // Case 2 - Test update index fails + createIndex(INDEX_NAME); + IndexShard indexShard = getIndexShard(dataNode); + assertEquals(Durability.REQUEST, indexShard.indexSettings().getTranslogDurability()); + exception = assertThrows( + IllegalArgumentException.class, + () -> client(clusterManagerName).admin() + .indices() + .updateSettings(new UpdateSettingsRequest(INDEX_NAME).settings(indexSettings)) + .actionGet() + ); + assertEquals(expectedExceptionMsg, exception.getMessage()); + } + private IndexShard getIndexShard(String dataNode) throws ExecutionException, InterruptedException { String clusterManagerName = internalCluster().getClusterManagerName(); IndicesService indicesService = internalCluster().getInstance(IndicesService.class, dataNode); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartIT.java index 21f48ba99e651..98fab139f4902 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartIT.java @@ -46,8 +46,26 @@ protected Collection> nodePlugins() { return Stream.concat(super.nodePlugins().stream(), Stream.of(MockFsRepositoryPlugin.class)).collect(Collectors.toList()); } + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put( + remoteStoreClusterSettings( + REPOSITORY_NAME, + segmentRepoPath, + MockFsRepositoryPlugin.TYPE, + REPOSITORY_2_NAME, + translogRepoPath, + MockFsRepositoryPlugin.TYPE + ) + ) + .build(); + } + @Before public void setup() { + clusterSettingsSuppliedByTest = true; overrideBuildRepositoryMetadata = false; repositoryLocation = randomRepoPath(); compress = randomBoolean(); @@ -86,6 +104,7 @@ public RepositoryMetadata buildRepositoryMetadata(DiscoveryNode node, String nam } else { return super.buildRepositoryMetadata(node, name); } + } public void testRateLimitedRemoteUploads() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java b/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java index 6fafdb0912470..5207dab83f1d9 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java @@ -16,6 +16,7 @@ import org.opensearch.action.get.MultiGetRequest; import org.opensearch.action.get.MultiGetResponse; import org.opensearch.action.index.IndexRequestBuilder; +import org.opensearch.action.search.SearchPhaseName; import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; @@ -56,9 +57,11 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.opensearch.action.search.TransportSearchAction.SEARCH_REQUEST_STATS_ENABLED_KEY; import static org.opensearch.search.aggregations.AggregationBuilders.terms; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.lessThanOrEqualTo; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0, minNumDataNodes = 3) @@ -74,6 +77,7 @@ public void testSearchWithWRRShardRouting() throws IOException { .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "zone" + ".values", "a,b,c") .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey(), "zone") .put("cluster.routing.weighted.fail_open", false) + .put(SEARCH_REQUEST_STATS_ENABLED_KEY, true) .build(); logger.info("--> starting 6 nodes on different zones"); @@ -180,12 +184,39 @@ public void testSearchWithWRRShardRouting() throws IOException { assertFalse(!hitNodes.contains(nodeId)); } nodeStats = client().admin().cluster().prepareNodesStats().execute().actionGet(); + int num = 0; + int coordNumber = 0; for (NodeStats stat : nodeStats.getNodes()) { SearchStats.Stats searchStats = stat.getIndices().getSearch().getTotal(); + if (searchStats.getRequestStatsLongHolder() + .getRequestStatsHolder() + .get(SearchPhaseName.QUERY.getName()) + .getTimeInMillis() > 0) { + assertThat( + searchStats.getRequestStatsLongHolder().getRequestStatsHolder().get(SearchPhaseName.QUERY.getName()).getTotal(), + greaterThan(0L) + ); + assertThat( + searchStats.getRequestStatsLongHolder().getRequestStatsHolder().get(SearchPhaseName.FETCH.getName()).getTimeInMillis(), + greaterThan(0L) + ); + assertThat( + searchStats.getRequestStatsLongHolder().getRequestStatsHolder().get(SearchPhaseName.FETCH.getName()).getTotal(), + greaterThan(0L) + ); + assertThat( + searchStats.getRequestStatsLongHolder().getRequestStatsHolder().get(SearchPhaseName.EXPAND.getName()).getTotal(), + greaterThan(0L) + ); + coordNumber += 1; + } Assert.assertTrue(searchStats.getQueryCount() > 0L); Assert.assertTrue(searchStats.getFetchCount() > 0L); + num++; } + assertThat(coordNumber, greaterThan(0)); + assertThat(num, greaterThan(0)); } private Map> setupCluster(int nodeCountPerAZ, Settings commonSettings) { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/simple/ParameterizedSimpleSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/simple/ParameterizedSimpleSearchIT.java new file mode 100644 index 0000000000000..719b75079da92 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/search/simple/ParameterizedSimpleSearchIT.java @@ -0,0 +1,608 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.search.simple; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.action.index.IndexRequestBuilder; +import org.opensearch.action.search.SearchPhaseExecutionException; +import org.opensearch.action.search.SearchRequestBuilder; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.support.WriteRequest.RefreshPolicy; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.query.QueryBuilders; +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.search.rescore.QueryRescorerBuilder; +import org.opensearch.search.sort.SortOrder; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.concurrent.ExecutionException; + +import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.index.query.QueryBuilders.boolQuery; +import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.index.query.QueryBuilders.queryStringQuery; +import static org.opensearch.index.query.QueryBuilders.rangeQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFailures; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; + +public class ParameterizedSimpleSearchIT extends ParameterizedOpenSearchIntegTestCase { + + public ParameterizedSimpleSearchIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + + public void testSearchNullIndex() { + expectThrows( + NullPointerException.class, + () -> client().prepareSearch((String) null).setQuery(QueryBuilders.termQuery("_id", "XXX1")).get() + ); + + expectThrows( + NullPointerException.class, + () -> client().prepareSearch((String[]) null).setQuery(QueryBuilders.termQuery("_id", "XXX1")).get() + ); + + } + + public void testSearchRandomPreference() throws InterruptedException, ExecutionException { + createIndex("test"); + indexRandom( + true, + client().prepareIndex("test").setId("1").setSource("field", "value"), + client().prepareIndex("test").setId("2").setSource("field", "value"), + client().prepareIndex("test").setId("3").setSource("field", "value"), + client().prepareIndex("test").setId("4").setSource("field", "value"), + client().prepareIndex("test").setId("5").setSource("field", "value"), + client().prepareIndex("test").setId("6").setSource("field", "value") + ); + + int iters = scaledRandomIntBetween(10, 20); + for (int i = 0; i < iters; i++) { + String randomPreference = randomUnicodeOfLengthBetween(0, 4); + // randomPreference should not start with '_' (reserved for known preference types (e.g. _shards, _primary) + while (randomPreference.startsWith("_")) { + randomPreference = randomUnicodeOfLengthBetween(0, 4); + } + // id is not indexed, but lets see that we automatically convert to + SearchResponse searchResponse = client().prepareSearch() + .setQuery(QueryBuilders.matchAllQuery()) + .setPreference(randomPreference) + .get(); + assertHitCount(searchResponse, 6L); + + } + } + + public void testSimpleIp() throws Exception { + createIndex("test"); + + client().admin() + .indices() + .preparePutMapping("test") + .setSource( + XContentFactory.jsonBuilder() + .startObject() + .startObject(MapperService.SINGLE_MAPPING_NAME) + .startObject("properties") + .startObject("from") + .field("type", "ip") + .endObject() + .startObject("to") + .field("type", "ip") + .endObject() + .endObject() + .endObject() + .endObject() + ) + .get(); + + client().prepareIndex("test").setId("1").setSource("from", "192.168.0.5", "to", "192.168.0.10").setRefreshPolicy(IMMEDIATE).get(); + + SearchResponse search = client().prepareSearch() + .setQuery(boolQuery().must(rangeQuery("from").lte("192.168.0.7")).must(rangeQuery("to").gte("192.168.0.7"))) + .get(); + + assertHitCount(search, 1L); + } + + public void testIpCidr() throws Exception { + createIndex("test"); + + client().admin() + .indices() + .preparePutMapping("test") + .setSource( + XContentFactory.jsonBuilder() + .startObject() + .startObject(MapperService.SINGLE_MAPPING_NAME) + .startObject("properties") + .startObject("ip") + .field("type", "ip") + .endObject() + .endObject() + .endObject() + .endObject() + ) + .get(); + ensureGreen(); + + client().prepareIndex("test").setId("1").setSource("ip", "192.168.0.1").get(); + client().prepareIndex("test").setId("2").setSource("ip", "192.168.0.2").get(); + client().prepareIndex("test").setId("3").setSource("ip", "192.168.0.3").get(); + client().prepareIndex("test").setId("4").setSource("ip", "192.168.1.4").get(); + client().prepareIndex("test").setId("5").setSource("ip", "2001:db8::ff00:42:8329").get(); + refresh(); + + SearchResponse search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.0.1"))).get(); + assertHitCount(search, 1L); + + search = client().prepareSearch().setQuery(queryStringQuery("ip: 192.168.0.1")).get(); + assertHitCount(search, 1L); + + search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.0.1/32"))).get(); + assertHitCount(search, 1L); + + search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.0.0/24"))).get(); + assertHitCount(search, 3L); + + search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.0.0.0/8"))).get(); + assertHitCount(search, 4L); + + search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "0.0.0.0/0"))).get(); + assertHitCount(search, 4L); + + search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "2001:db8::ff00:42:8329/128"))).get(); + assertHitCount(search, 1L); + + search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "2001:db8::/64"))).get(); + assertHitCount(search, 1L); + + search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "::/0"))).get(); + assertHitCount(search, 5L); + + search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.1.5/32"))).get(); + assertHitCount(search, 0L); + + assertFailures( + client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "0/0/0/0/0"))), + RestStatus.BAD_REQUEST, + containsString("Expected [ip/prefix] but was [0/0/0/0/0]") + ); + } + + public void testSimpleId() { + createIndex("test"); + + client().prepareIndex("test").setId("XXX1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + // id is not indexed, but lets see that we automatically convert to + SearchResponse searchResponse = client().prepareSearch().setQuery(QueryBuilders.termQuery("_id", "XXX1")).get(); + assertHitCount(searchResponse, 1L); + + searchResponse = client().prepareSearch().setQuery(QueryBuilders.queryStringQuery("_id:XXX1")).get(); + assertHitCount(searchResponse, 1L); + } + + public void testSimpleDateRange() throws Exception { + createIndex("test"); + client().prepareIndex("test").setId("1").setSource("field", "2010-01-05T02:00").get(); + client().prepareIndex("test").setId("2").setSource("field", "2010-01-06T02:00").get(); + ensureGreen(); + refresh(); + SearchResponse searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-03||+2d").lte("2010-01-04||+2d/d")) + .get(); + assertNoFailures(searchResponse); + assertHitCount(searchResponse, 2L); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-05T02:00").lte("2010-01-06T02:00")) + .get(); + assertNoFailures(searchResponse); + assertHitCount(searchResponse, 2L); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-05T02:00").lt("2010-01-06T02:00")) + .get(); + assertNoFailures(searchResponse); + assertHitCount(searchResponse, 1L); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gt("2010-01-05T02:00").lt("2010-01-06T02:00")) + .get(); + assertNoFailures(searchResponse); + assertHitCount(searchResponse, 0L); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.queryStringQuery("field:[2010-01-03||+2d TO 2010-01-04||+2d/d]")) + .get(); + assertHitCount(searchResponse, 2L); + } + + // TODO: combine this test with SimpleSearchIT.testSimpleTerminateAfterCount after + // https://github.com/opensearch-project/OpenSearch/issues/8371 + public void testSimpleTerminateAfterCountWithSizeAndTrackHits() throws Exception { + prepareCreate("test").setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)).get(); + ensureGreen(); + int numDocs = randomIntBetween(15, 29); + List docbuilders = new ArrayList<>(numDocs); + + for (int i = 1; i <= numDocs; i++) { + String id = String.valueOf(i); + docbuilders.add(client().prepareIndex("test").setId(id).setSource("field", i)); + } + + indexRandom(true, docbuilders); + ensureGreen(); + refresh(); + + SearchResponse searchResponse; + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.matchAllQuery()) + .setTerminateAfter(numDocs) + .setSize(0) + .setTrackTotalHits(true) + .get(); + assertEquals(0, searchResponse.getFailedShards()); + } + + public void testSimpleIndexSortEarlyTerminate() throws Exception { + prepareCreate("test").setSettings( + Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0).put("index.sort.field", "rank") + ).setMapping("rank", "type=integer").get(); + ensureGreen(); + int max = randomIntBetween(3, 29); + List docbuilders = new ArrayList<>(max); + + for (int i = max - 1; i >= 0; i--) { + String id = String.valueOf(i); + docbuilders.add(client().prepareIndex("test").setId(id).setSource("rank", i)); + } + + indexRandom(true, docbuilders); + ensureGreen(); + refresh(); + + SearchResponse searchResponse; + for (int i = 1; i < max; i++) { + searchResponse = client().prepareSearch("test") + .addDocValueField("rank") + .setTrackTotalHits(false) + .addSort("rank", SortOrder.ASC) + .setSize(i) + .get(); + assertNull(searchResponse.getHits().getTotalHits()); + for (int j = 0; j < i; j++) { + assertThat(searchResponse.getHits().getAt(j).field("rank").getValue(), equalTo((long) j)); + } + } + } + + public void testInsaneFromAndSize() throws Exception { + createIndex("idx"); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertWindowFails(client().prepareSearch("idx").setFrom(Integer.MAX_VALUE)); + assertWindowFails(client().prepareSearch("idx").setSize(Integer.MAX_VALUE)); + } + + public void testTooLargeFromAndSize() throws Exception { + createIndex("idx"); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertWindowFails(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY))); + assertWindowFails(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + 1)); + assertWindowFails( + client().prepareSearch("idx") + .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) + .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) + ); + } + + public void testLargeFromAndSizeSucceeds() throws Exception { + createIndex("idx"); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertHitCount(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) - 10).get(), 1); + assertHitCount(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)).get(), 1); + assertHitCount( + client().prepareSearch("idx") + .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) / 2) + .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) / 2 - 1) + .get(), + 1 + ); + } + + public void testTooLargeFromAndSizeOkBySetting() throws Exception { + prepareCreate("idx").setSettings( + Settings.builder() + .put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 2) + ).get(); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertHitCount(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)).get(), 1); + assertHitCount(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + 1).get(), 1); + assertHitCount( + client().prepareSearch("idx") + .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) + .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) + .get(), + 1 + ); + } + + public void testTooLargeFromAndSizeOkByDynamicSetting() throws Exception { + createIndex("idx"); + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings("idx") + .setSettings( + Settings.builder() + .put( + IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), + IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 2 + ) + ) + .get() + ); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertHitCount(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)).get(), 1); + assertHitCount(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + 1).get(), 1); + assertHitCount( + client().prepareSearch("idx") + .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) + .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) + .get(), + 1 + ); + } + + public void testTooLargeFromAndSizeBackwardsCompatibilityRecommendation() throws Exception { + prepareCreate("idx").setSettings(Settings.builder().put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), Integer.MAX_VALUE)).get(); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertHitCount(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10).get(), 1); + assertHitCount(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10).get(), 1); + assertHitCount( + client().prepareSearch("idx") + .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10) + .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10) + .get(), + 1 + ); + } + + public void testTooLargeRescoreWindow() throws Exception { + createIndex("idx"); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertRescoreWindowFails(Integer.MAX_VALUE); + assertRescoreWindowFails(IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY) + 1); + } + + public void testTooLargeRescoreOkBySetting() throws Exception { + int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); + prepareCreate("idx").setSettings(Settings.builder().put(IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey(), defaultMaxWindow * 2)) + .get(); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertHitCount( + client().prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)).get(), + 1 + ); + } + + public void testTooLargeRescoreOkByResultWindowSetting() throws Exception { + int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); + prepareCreate("idx").setSettings( + Settings.builder() + .put( + IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), // Note that this is the RESULT window. + defaultMaxWindow * 2 + ) + ).get(); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertHitCount( + client().prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)).get(), + 1 + ); + } + + public void testTooLargeRescoreOkByDynamicSetting() throws Exception { + int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); + createIndex("idx"); + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings("idx") + .setSettings(Settings.builder().put(IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey(), defaultMaxWindow * 2)) + .get() + ); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertHitCount( + client().prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)).get(), + 1 + ); + } + + public void testTooLargeRescoreOkByDynamicResultWindowSetting() throws Exception { + int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); + createIndex("idx"); + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings("idx") + .setSettings( + // Note that this is the RESULT window + Settings.builder().put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), defaultMaxWindow * 2) + ) + .get() + ); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertHitCount( + client().prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)).get(), + 1 + ); + } + + public void testQueryNumericFieldWithRegex() throws Exception { + assertAcked(prepareCreate("idx").setMapping("num", "type=integer")); + ensureGreen("idx"); + + try { + client().prepareSearch("idx").setQuery(QueryBuilders.regexpQuery("num", "34")).get(); + fail("SearchPhaseExecutionException should have been thrown"); + } catch (SearchPhaseExecutionException ex) { + assertThat(ex.getRootCause().getMessage(), containsString("Can only use regexp queries on keyword and text fields")); + } + } + + public void testTermQueryBigInt() throws Exception { + prepareCreate("idx").setMapping("field", "type=keyword").get(); + ensureGreen("idx"); + + client().prepareIndex("idx") + .setId("1") + .setSource("{\"field\" : 80315953321748200608 }", MediaTypeRegistry.JSON) + .setRefreshPolicy(RefreshPolicy.IMMEDIATE) + .get(); + + String queryJson = "{ \"field\" : { \"value\" : 80315953321748200608 } }"; + XContentParser parser = createParser(JsonXContent.jsonXContent, queryJson); + parser.nextToken(); + TermQueryBuilder query = TermQueryBuilder.fromXContent(parser); + SearchResponse searchResponse = client().prepareSearch("idx").setQuery(query).get(); + assertEquals(1, searchResponse.getHits().getTotalHits().value); + } + + public void testTooLongRegexInRegexpQuery() throws Exception { + createIndex("idx"); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + int defaultMaxRegexLength = IndexSettings.MAX_REGEX_LENGTH_SETTING.get(Settings.EMPTY); + StringBuilder regexp = new StringBuilder(defaultMaxRegexLength); + while (regexp.length() <= defaultMaxRegexLength) { + regexp.append("]\\r\\\\]|\\\\.)*\\](?:(?:\\r\\n)?[\\t])*))*(?:,@(?:(?:\\r\\n)?[ \\t])*(?:[^()<>@,;:\\\\\".\\"); + } + SearchPhaseExecutionException e = expectThrows( + SearchPhaseExecutionException.class, + () -> client().prepareSearch("idx").setQuery(QueryBuilders.regexpQuery("num", regexp.toString())).get() + ); + assertThat( + e.getRootCause().getMessage(), + containsString( + "The length of regex [" + + regexp.length() + + "] used in the Regexp Query request has exceeded " + + "the allowed maximum of [" + + defaultMaxRegexLength + + "]. " + + "This maximum can be set by changing the [" + + IndexSettings.MAX_REGEX_LENGTH_SETTING.getKey() + + "] index level setting." + ) + ); + } + + private void assertWindowFails(SearchRequestBuilder search) { + SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class, () -> search.get()); + assertThat( + e.toString(), + containsString( + "Result window is too large, from + size must be less than or equal to: [" + + IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + ) + ); + assertThat(e.toString(), containsString("See the scroll api for a more efficient way to request large data sets")); + } + + private void assertRescoreWindowFails(int windowSize) { + SearchRequestBuilder search = client().prepareSearch("idx") + .addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(windowSize)); + SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class, () -> search.get()); + assertThat( + e.toString(), + containsString( + "Rescore window [" + + windowSize + + "] is too large. It must " + + "be less than [" + + IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY) + ) + ); + assertThat( + e.toString(), + containsString( + "This limit can be set by changing the [" + IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey() + "] index level setting." + ) + ); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/search/simple/SimpleSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/simple/SimpleSearchIT.java index 0e6073ad11689..67e460653245e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/simple/SimpleSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/simple/SimpleSearchIT.java @@ -6,258 +6,24 @@ * compatible open source license. */ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - package org.opensearch.search.simple; import org.opensearch.action.index.IndexRequestBuilder; -import org.opensearch.action.search.SearchPhaseExecutionException; -import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; -import org.opensearch.action.support.WriteRequest.RefreshPolicy; import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.core.rest.RestStatus; -import org.opensearch.core.xcontent.MediaTypeRegistry; -import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.index.IndexSettings; -import org.opensearch.index.mapper.MapperService; import org.opensearch.index.query.QueryBuilders; -import org.opensearch.index.query.TermQueryBuilder; -import org.opensearch.search.rescore.QueryRescorerBuilder; -import org.opensearch.search.sort.SortOrder; import org.opensearch.test.OpenSearchIntegTestCase; import java.util.ArrayList; import java.util.List; -import java.util.concurrent.ExecutionException; -import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; -import static org.opensearch.index.query.QueryBuilders.boolQuery; -import static org.opensearch.index.query.QueryBuilders.matchAllQuery; -import static org.opensearch.index.query.QueryBuilders.queryStringQuery; -import static org.opensearch.index.query.QueryBuilders.rangeQuery; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFailures; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; public class SimpleSearchIT extends OpenSearchIntegTestCase { - public void testSearchNullIndex() { - expectThrows( - NullPointerException.class, - () -> client().prepareSearch((String) null).setQuery(QueryBuilders.termQuery("_id", "XXX1")).get() - ); - - expectThrows( - NullPointerException.class, - () -> client().prepareSearch((String[]) null).setQuery(QueryBuilders.termQuery("_id", "XXX1")).get() - ); - - } - - public void testSearchRandomPreference() throws InterruptedException, ExecutionException { - createIndex("test"); - indexRandom( - true, - client().prepareIndex("test").setId("1").setSource("field", "value"), - client().prepareIndex("test").setId("2").setSource("field", "value"), - client().prepareIndex("test").setId("3").setSource("field", "value"), - client().prepareIndex("test").setId("4").setSource("field", "value"), - client().prepareIndex("test").setId("5").setSource("field", "value"), - client().prepareIndex("test").setId("6").setSource("field", "value") - ); - - int iters = scaledRandomIntBetween(10, 20); - for (int i = 0; i < iters; i++) { - String randomPreference = randomUnicodeOfLengthBetween(0, 4); - // randomPreference should not start with '_' (reserved for known preference types (e.g. _shards, _primary) - while (randomPreference.startsWith("_")) { - randomPreference = randomUnicodeOfLengthBetween(0, 4); - } - // id is not indexed, but lets see that we automatically convert to - SearchResponse searchResponse = client().prepareSearch() - .setQuery(QueryBuilders.matchAllQuery()) - .setPreference(randomPreference) - .get(); - assertHitCount(searchResponse, 6L); - - } - } - - public void testSimpleIp() throws Exception { - createIndex("test"); - - client().admin() - .indices() - .preparePutMapping("test") - .setSource( - XContentFactory.jsonBuilder() - .startObject() - .startObject(MapperService.SINGLE_MAPPING_NAME) - .startObject("properties") - .startObject("from") - .field("type", "ip") - .endObject() - .startObject("to") - .field("type", "ip") - .endObject() - .endObject() - .endObject() - .endObject() - ) - .get(); - - client().prepareIndex("test").setId("1").setSource("from", "192.168.0.5", "to", "192.168.0.10").setRefreshPolicy(IMMEDIATE).get(); - - SearchResponse search = client().prepareSearch() - .setQuery(boolQuery().must(rangeQuery("from").lte("192.168.0.7")).must(rangeQuery("to").gte("192.168.0.7"))) - .get(); - - assertHitCount(search, 1L); - } - - public void testIpCidr() throws Exception { - createIndex("test"); - - client().admin() - .indices() - .preparePutMapping("test") - .setSource( - XContentFactory.jsonBuilder() - .startObject() - .startObject(MapperService.SINGLE_MAPPING_NAME) - .startObject("properties") - .startObject("ip") - .field("type", "ip") - .endObject() - .endObject() - .endObject() - .endObject() - ) - .get(); - ensureGreen(); - - client().prepareIndex("test").setId("1").setSource("ip", "192.168.0.1").get(); - client().prepareIndex("test").setId("2").setSource("ip", "192.168.0.2").get(); - client().prepareIndex("test").setId("3").setSource("ip", "192.168.0.3").get(); - client().prepareIndex("test").setId("4").setSource("ip", "192.168.1.4").get(); - client().prepareIndex("test").setId("5").setSource("ip", "2001:db8::ff00:42:8329").get(); - refresh(); - - SearchResponse search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.0.1"))).get(); - assertHitCount(search, 1L); - - search = client().prepareSearch().setQuery(queryStringQuery("ip: 192.168.0.1")).get(); - assertHitCount(search, 1L); - - search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.0.1/32"))).get(); - assertHitCount(search, 1L); - - search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.0.0/24"))).get(); - assertHitCount(search, 3L); - - search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.0.0.0/8"))).get(); - assertHitCount(search, 4L); - - search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "0.0.0.0/0"))).get(); - assertHitCount(search, 4L); - - search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "2001:db8::ff00:42:8329/128"))).get(); - assertHitCount(search, 1L); - - search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "2001:db8::/64"))).get(); - assertHitCount(search, 1L); - - search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "::/0"))).get(); - assertHitCount(search, 5L); - - search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.1.5/32"))).get(); - assertHitCount(search, 0L); - - assertFailures( - client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "0/0/0/0/0"))), - RestStatus.BAD_REQUEST, - containsString("Expected [ip/prefix] but was [0/0/0/0/0]") - ); - } - - public void testSimpleId() { - createIndex("test"); - - client().prepareIndex("test").setId("XXX1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - // id is not indexed, but lets see that we automatically convert to - SearchResponse searchResponse = client().prepareSearch().setQuery(QueryBuilders.termQuery("_id", "XXX1")).get(); - assertHitCount(searchResponse, 1L); - - searchResponse = client().prepareSearch().setQuery(QueryBuilders.queryStringQuery("_id:XXX1")).get(); - assertHitCount(searchResponse, 1L); - } - - public void testSimpleDateRange() throws Exception { - createIndex("test"); - client().prepareIndex("test").setId("1").setSource("field", "2010-01-05T02:00").get(); - client().prepareIndex("test").setId("2").setSource("field", "2010-01-06T02:00").get(); - ensureGreen(); - refresh(); - SearchResponse searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-03||+2d").lte("2010-01-04||+2d/d")) - .get(); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 2L); - - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-05T02:00").lte("2010-01-06T02:00")) - .get(); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 2L); - - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-05T02:00").lt("2010-01-06T02:00")) - .get(); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 1L); - - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.rangeQuery("field").gt("2010-01-05T02:00").lt("2010-01-06T02:00")) - .get(); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 0L); - - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.queryStringQuery("field:[2010-01-03||+2d TO 2010-01-04||+2d/d]")) - .get(); - assertHitCount(searchResponse, 2L); - } - + // TODO: Move this test to ParameterizedSimpleSearchIT after https://github.com/opensearch-project/OpenSearch/issues/8371 public void testSimpleTerminateAfterCount() throws Exception { prepareCreate("test").setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)).get(); ensureGreen(); @@ -291,302 +57,4 @@ public void testSimpleTerminateAfterCount() throws Exception { assertHitCount(searchResponse, max); assertFalse(searchResponse.isTerminatedEarly()); } - - public void testSimpleIndexSortEarlyTerminate() throws Exception { - prepareCreate("test").setSettings( - Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0).put("index.sort.field", "rank") - ).setMapping("rank", "type=integer").get(); - ensureGreen(); - int max = randomIntBetween(3, 29); - List docbuilders = new ArrayList<>(max); - - for (int i = max - 1; i >= 0; i--) { - String id = String.valueOf(i); - docbuilders.add(client().prepareIndex("test").setId(id).setSource("rank", i)); - } - - indexRandom(true, docbuilders); - ensureGreen(); - refresh(); - - SearchResponse searchResponse; - for (int i = 1; i < max; i++) { - searchResponse = client().prepareSearch("test") - .addDocValueField("rank") - .setTrackTotalHits(false) - .addSort("rank", SortOrder.ASC) - .setSize(i) - .get(); - assertNull(searchResponse.getHits().getTotalHits()); - for (int j = 0; j < i; j++) { - assertThat(searchResponse.getHits().getAt(j).field("rank").getValue(), equalTo((long) j)); - } - } - } - - public void testInsaneFromAndSize() throws Exception { - createIndex("idx"); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertWindowFails(client().prepareSearch("idx").setFrom(Integer.MAX_VALUE)); - assertWindowFails(client().prepareSearch("idx").setSize(Integer.MAX_VALUE)); - } - - public void testTooLargeFromAndSize() throws Exception { - createIndex("idx"); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertWindowFails(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY))); - assertWindowFails(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + 1)); - assertWindowFails( - client().prepareSearch("idx") - .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) - .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) - ); - } - - public void testLargeFromAndSizeSucceeds() throws Exception { - createIndex("idx"); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertHitCount(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) - 10).get(), 1); - assertHitCount(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)).get(), 1); - assertHitCount( - client().prepareSearch("idx") - .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) / 2) - .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) / 2 - 1) - .get(), - 1 - ); - } - - public void testTooLargeFromAndSizeOkBySetting() throws Exception { - prepareCreate("idx").setSettings( - Settings.builder() - .put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 2) - ).get(); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertHitCount(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)).get(), 1); - assertHitCount(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + 1).get(), 1); - assertHitCount( - client().prepareSearch("idx") - .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) - .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) - .get(), - 1 - ); - } - - public void testTooLargeFromAndSizeOkByDynamicSetting() throws Exception { - createIndex("idx"); - assertAcked( - client().admin() - .indices() - .prepareUpdateSettings("idx") - .setSettings( - Settings.builder() - .put( - IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), - IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 2 - ) - ) - .get() - ); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertHitCount(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)).get(), 1); - assertHitCount(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + 1).get(), 1); - assertHitCount( - client().prepareSearch("idx") - .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) - .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) - .get(), - 1 - ); - } - - public void testTooLargeFromAndSizeBackwardsCompatibilityRecommendation() throws Exception { - prepareCreate("idx").setSettings(Settings.builder().put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), Integer.MAX_VALUE)).get(); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertHitCount(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10).get(), 1); - assertHitCount(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10).get(), 1); - assertHitCount( - client().prepareSearch("idx") - .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10) - .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10) - .get(), - 1 - ); - } - - public void testTooLargeRescoreWindow() throws Exception { - createIndex("idx"); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertRescoreWindowFails(Integer.MAX_VALUE); - assertRescoreWindowFails(IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY) + 1); - } - - public void testTooLargeRescoreOkBySetting() throws Exception { - int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); - prepareCreate("idx").setSettings(Settings.builder().put(IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey(), defaultMaxWindow * 2)) - .get(); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertHitCount( - client().prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)).get(), - 1 - ); - } - - public void testTooLargeRescoreOkByResultWindowSetting() throws Exception { - int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); - prepareCreate("idx").setSettings( - Settings.builder() - .put( - IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), // Note that this is the RESULT window. - defaultMaxWindow * 2 - ) - ).get(); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertHitCount( - client().prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)).get(), - 1 - ); - } - - public void testTooLargeRescoreOkByDynamicSetting() throws Exception { - int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); - createIndex("idx"); - assertAcked( - client().admin() - .indices() - .prepareUpdateSettings("idx") - .setSettings(Settings.builder().put(IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey(), defaultMaxWindow * 2)) - .get() - ); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertHitCount( - client().prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)).get(), - 1 - ); - } - - public void testTooLargeRescoreOkByDynamicResultWindowSetting() throws Exception { - int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); - createIndex("idx"); - assertAcked( - client().admin() - .indices() - .prepareUpdateSettings("idx") - .setSettings( - // Note that this is the RESULT window - Settings.builder().put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), defaultMaxWindow * 2) - ) - .get() - ); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertHitCount( - client().prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)).get(), - 1 - ); - } - - public void testQueryNumericFieldWithRegex() throws Exception { - assertAcked(prepareCreate("idx").setMapping("num", "type=integer")); - ensureGreen("idx"); - - try { - client().prepareSearch("idx").setQuery(QueryBuilders.regexpQuery("num", "34")).get(); - fail("SearchPhaseExecutionException should have been thrown"); - } catch (SearchPhaseExecutionException ex) { - assertThat(ex.getRootCause().getMessage(), containsString("Can only use regexp queries on keyword and text fields")); - } - } - - public void testTermQueryBigInt() throws Exception { - prepareCreate("idx").setMapping("field", "type=keyword").get(); - ensureGreen("idx"); - - client().prepareIndex("idx") - .setId("1") - .setSource("{\"field\" : 80315953321748200608 }", MediaTypeRegistry.JSON) - .setRefreshPolicy(RefreshPolicy.IMMEDIATE) - .get(); - - String queryJson = "{ \"field\" : { \"value\" : 80315953321748200608 } }"; - XContentParser parser = createParser(JsonXContent.jsonXContent, queryJson); - parser.nextToken(); - TermQueryBuilder query = TermQueryBuilder.fromXContent(parser); - SearchResponse searchResponse = client().prepareSearch("idx").setQuery(query).get(); - assertEquals(1, searchResponse.getHits().getTotalHits().value); - } - - public void testTooLongRegexInRegexpQuery() throws Exception { - createIndex("idx"); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - int defaultMaxRegexLength = IndexSettings.MAX_REGEX_LENGTH_SETTING.get(Settings.EMPTY); - StringBuilder regexp = new StringBuilder(defaultMaxRegexLength); - while (regexp.length() <= defaultMaxRegexLength) { - regexp.append("]\\r\\\\]|\\\\.)*\\](?:(?:\\r\\n)?[\\t])*))*(?:,@(?:(?:\\r\\n)?[ \\t])*(?:[^()<>@,;:\\\\\".\\"); - } - SearchPhaseExecutionException e = expectThrows( - SearchPhaseExecutionException.class, - () -> client().prepareSearch("idx").setQuery(QueryBuilders.regexpQuery("num", regexp.toString())).get() - ); - assertThat( - e.getRootCause().getMessage(), - containsString( - "The length of regex [" - + regexp.length() - + "] used in the Regexp Query request has exceeded " - + "the allowed maximum of [" - + defaultMaxRegexLength - + "]. " - + "This maximum can be set by changing the [" - + IndexSettings.MAX_REGEX_LENGTH_SETTING.getKey() - + "] index level setting." - ) - ); - } - - private void assertWindowFails(SearchRequestBuilder search) { - SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class, () -> search.get()); - assertThat( - e.toString(), - containsString( - "Result window is too large, from + size must be less than or equal to: [" - + IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) - ) - ); - assertThat(e.toString(), containsString("See the scroll api for a more efficient way to request large data sets")); - } - - private void assertRescoreWindowFails(int windowSize) { - SearchRequestBuilder search = client().prepareSearch("idx") - .addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(windowSize)); - SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class, () -> search.get()); - assertThat( - e.toString(), - containsString( - "Rescore window [" - + windowSize - + "] is too large. It must " - + "be less than [" - + IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY) - ) - ); - assertThat( - e.toString(), - containsString( - "This limit can be set by changing the [" + IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey() + "] index level setting." - ) - ); - } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/stats/SearchStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/stats/SearchStatsIT.java index 23d48b173a3db..253a8b2b14824 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/stats/SearchStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/stats/SearchStatsIT.java @@ -37,6 +37,7 @@ import org.opensearch.action.admin.cluster.node.stats.NodeStats; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; +import org.opensearch.action.search.SearchPhaseName; import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.routing.GroupShardsIterator; @@ -63,6 +64,7 @@ import java.util.Set; import java.util.function.Function; +import static org.opensearch.action.search.TransportSearchAction.SEARCH_REQUEST_STATS_ENABLED_KEY; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; @@ -78,7 +80,7 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -@OpenSearchIntegTestCase.ClusterScope(minNumDataNodes = 2) +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, minNumDataNodes = 2) public class SearchStatsIT extends ParameterizedOpenSearchIntegTestCase { public SearchStatsIT(Settings dynamicSettings) { @@ -126,6 +128,11 @@ public void testSimpleStats() throws Exception { assertThat(numNodes, greaterThanOrEqualTo(2)); final int shardsIdx1 = randomIntBetween(1, 10); // we make sure each node gets at least a single shard... final int shardsIdx2 = Math.max(numNodes - shardsIdx1, randomIntBetween(1, 10)); + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put(SEARCH_REQUEST_STATS_ENABLED_KEY, true).build()) + .get(); assertThat(numNodes, lessThanOrEqualTo(shardsIdx1 + shardsIdx2)); assertAcked( prepareCreate("test1").setSettings( @@ -188,20 +195,40 @@ public void testSimpleStats() throws Exception { Set nodeIdsWithIndex = nodeIdsWithIndex("test1", "test2"); int num = 0; + int numOfCoordinators = 0; + for (NodeStats stat : nodeStats.getNodes()) { Stats total = stat.getIndices().getSearch().getTotal(); + if (total.getRequestStatsLongHolder().getRequestStatsHolder().get(SearchPhaseName.QUERY.getName()).getTimeInMillis() > 0) { + assertThat( + total.getRequestStatsLongHolder().getRequestStatsHolder().get(SearchPhaseName.FETCH.getName()).getTimeInMillis(), + greaterThan(0L) + ); + assertEquals( + iters, + total.getRequestStatsLongHolder().getRequestStatsHolder().get(SearchPhaseName.FETCH.getName()).getTotal() + ); + assertEquals( + iters, + total.getRequestStatsLongHolder().getRequestStatsHolder().get(SearchPhaseName.EXPAND.getName()).getTotal() + ); + assertEquals( + iters, + total.getRequestStatsLongHolder().getRequestStatsHolder().get(SearchPhaseName.FETCH.getName()).getTotal() + ); + numOfCoordinators += 1; + } if (nodeIdsWithIndex.contains(stat.getNode().getId())) { assertThat(total.getQueryCount(), greaterThan(0L)); assertThat(total.getQueryTimeInMillis(), greaterThan(0L)); num++; } else { - assertThat(total.getQueryCount(), equalTo(0L)); + assertThat(total.getQueryCount(), greaterThanOrEqualTo(0L)); assertThat(total.getQueryTimeInMillis(), equalTo(0L)); } } - + assertThat(numOfCoordinators, greaterThan(0)); assertThat(num, greaterThan(0)); - } private Set nodeIdsWithIndex(String... indices) { diff --git a/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java index d8b7facc09df2..726ba7ba119af 100644 --- a/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java @@ -78,7 +78,9 @@ import org.opensearch.index.IndexingPressureService; import org.opensearch.index.VersionType; import org.opensearch.index.seqno.SequenceNumbers; +import org.opensearch.index.shard.IndexingStats.Stats.DocStatusStats; import org.opensearch.indices.IndexClosedException; +import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndices; import org.opensearch.ingest.IngestService; import org.opensearch.node.NodeClosedException; @@ -129,6 +131,7 @@ public class TransportBulkAction extends HandledTransportAction> entry : requestsByShard.entrySet()) { final ShardId shardId = entry.getKey(); final List requests = entry.getValue(); @@ -632,8 +650,11 @@ public void onResponse(BulkShardResponse bulkShardResponse) { if (bulkItemResponse.getResponse() != null) { bulkItemResponse.getResponse().setShardInfo(bulkShardResponse.getShardInfo()); } + + docStatusStats.inc(bulkItemResponse.status()); responses.set(bulkItemResponse.getItemId(), bulkItemResponse); } + if (counter.decrementAndGet() == 0) { finishHim(); } @@ -644,22 +665,24 @@ public void onFailure(Exception e) { // create failures for all relevant requests for (BulkItemRequest request : requests) { final String indexName = concreteIndices.getConcreteIndex(request.index()).getName(); - DocWriteRequest docWriteRequest = request.request(); - responses.set( + final DocWriteRequest docWriteRequest = request.request(); + final BulkItemResponse bulkItemResponse = new BulkItemResponse( request.id(), - new BulkItemResponse( - request.id(), - docWriteRequest.opType(), - new BulkItemResponse.Failure(indexName, docWriteRequest.id(), e) - ) + docWriteRequest.opType(), + new BulkItemResponse.Failure(indexName, docWriteRequest.id(), e) ); + + docStatusStats.inc(bulkItemResponse.status()); + responses.set(request.id(), bulkItemResponse); } + if (counter.decrementAndGet() == 0) { finishHim(); } } private void finishHim() { + indicesService.addDocStatusStats(docStatusStats); listener.onResponse( new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTimeNanos)) ); @@ -766,6 +789,10 @@ void executeBulk( final AtomicArray responses, Map indicesThatCannotBeCreated ) { + /* + * We are not wrapping the listener here to capture the response codes for performance benefits. It will + * be saving us an iteration over the responses array + */ new BulkOperation(task, bulkRequest, listener, responses, startTimeNanos, indicesThatCannotBeCreated).run(); } diff --git a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java index ee8aa10577956..1c0a1280ad550 100644 --- a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java @@ -65,6 +65,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executor; @@ -107,7 +108,6 @@ abstract class AbstractSearchAsyncAction exten private final AtomicInteger skippedOps = new AtomicInteger(); private final TransportSearchAction.SearchTimeProvider timeProvider; private final SearchResponse.Clusters clusters; - protected final GroupShardsIterator toSkipShardsIts; protected final GroupShardsIterator shardsIts; private final int expectedTotalOps; @@ -116,8 +116,12 @@ abstract class AbstractSearchAsyncAction exten private final Map pendingExecutionsPerNode = new ConcurrentHashMap<>(); private final boolean throttleConcurrentRequests; + private SearchPhase currentPhase; + private final List releasables = new ArrayList<>(); + private Optional searchRequestOperationsListener; + AbstractSearchAsyncAction( String name, Logger logger, @@ -135,7 +139,8 @@ abstract class AbstractSearchAsyncAction exten SearchTask task, SearchPhaseResults resultConsumer, int maxConcurrentRequestsPerNode, - SearchResponse.Clusters clusters + SearchResponse.Clusters clusters, + SearchRequestOperationsListener searchRequestOperationsListener ) { super(name); final List toSkipIterators = new ArrayList<>(); @@ -171,6 +176,7 @@ abstract class AbstractSearchAsyncAction exten this.indexRoutings = indexRoutings; this.results = resultConsumer; this.clusters = clusters; + this.searchRequestOperationsListener = Optional.ofNullable(searchRequestOperationsListener); } @Override @@ -371,6 +377,7 @@ public final void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPha : OpenSearchException.guessRootCauses(shardSearchFailures[0].getCause())[0]; logger.debug(() -> new ParameterizedMessage("All shards failed for phase: [{}]", getName()), cause); onPhaseFailure(currentPhase, "all shards failed", cause); + } else { Boolean allowPartialResults = request.allowPartialSearchResults(); assert allowPartialResults != null : "SearchRequest missing setting for allowPartialSearchResults"; @@ -419,13 +426,24 @@ public final void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPha clusterState.version() ); } + onPhaseEnd(); executePhase(nextPhase); } } + private void onPhaseEnd() { + this.searchRequestOperationsListener.ifPresent(searchRequestOperations -> { searchRequestOperations.onPhaseEnd(this); }); + } + + private void onPhaseStart(SearchPhase phase) { + setCurrentPhase(phase); + this.searchRequestOperationsListener.ifPresent(searchRequestOperations -> { searchRequestOperations.onPhaseStart(this); }); + } + private void executePhase(SearchPhase phase) { try { - phase.run(); + onPhaseStart(phase); + phase.recordAndRun(); } catch (Exception e) { if (logger.isDebugEnabled()) { logger.debug(new ParameterizedMessage("Failed to execute [{}] while moving to [{}] phase", request, phase.getName()), e); @@ -603,6 +621,14 @@ private void successfulShardExecution(SearchShardIterator shardsIt) { } } + public SearchPhase getCurrentPhase() { + return currentPhase; + } + + private void setCurrentPhase(SearchPhase phase) { + currentPhase = phase; + } + @Override public final int getNumShards() { return results.getNumShards(); @@ -670,10 +696,13 @@ public void sendSearchResponse(InternalSearchResponse internalSearchResponse, At } listener.onResponse(buildSearchResponse(internalSearchResponse, failures, scrollId, searchContextId)); } + onPhaseEnd(); + setCurrentPhase(null); } @Override public final void onPhaseFailure(SearchPhase phase, String msg, Throwable cause) { + this.searchRequestOperationsListener.ifPresent(searchRequestOperations -> searchRequestOperations.onPhaseFailure(this)); raisePhaseFailure(new SearchPhaseExecutionException(phase.getName(), msg, cause, buildShardFailures())); } diff --git a/server/src/main/java/org/opensearch/action/search/CanMatchPreFilterSearchPhase.java b/server/src/main/java/org/opensearch/action/search/CanMatchPreFilterSearchPhase.java index 6c3ee652de2de..ae481736ad0aa 100644 --- a/server/src/main/java/org/opensearch/action/search/CanMatchPreFilterSearchPhase.java +++ b/server/src/main/java/org/opensearch/action/search/CanMatchPreFilterSearchPhase.java @@ -90,7 +90,8 @@ final class CanMatchPreFilterSearchPhase extends AbstractSearchAsyncAction, SearchPhase> phaseFactory, - SearchResponse.Clusters clusters + SearchResponse.Clusters clusters, + SearchRequestOperationsListener searchRequestOperationsListener ) { // We set max concurrent shard requests to the number of shards so no throttling happens for can_match requests super( @@ -110,7 +111,8 @@ final class CanMatchPreFilterSearchPhase extends AbstractSearchAsyncAction(shardsIts.size()), request.getMaxConcurrentShardRequests(), - clusters + clusters, + searchRequestOperationsListener ); this.queryPhaseResultConsumer = queryPhaseResultConsumer; this.searchPhaseController = searchPhaseController; diff --git a/server/src/main/java/org/opensearch/action/search/SearchPhase.java b/server/src/main/java/org/opensearch/action/search/SearchPhase.java index 50b0cd8e01c1d..1c7b3c1f1563c 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchPhase.java +++ b/server/src/main/java/org/opensearch/action/search/SearchPhase.java @@ -42,13 +42,23 @@ * * @opensearch.internal */ -abstract class SearchPhase implements CheckedRunnable { +public abstract class SearchPhase implements CheckedRunnable { private final String name; + private long startTimeInNanos; protected SearchPhase(String name) { this.name = Objects.requireNonNull(name, "name must not be null"); } + public long getStartTimeInNanos() { + return startTimeInNanos; + } + + public void recordAndRun() throws IOException { + this.startTimeInNanos = System.nanoTime(); + run(); + } + /** * Returns the phases name. */ diff --git a/server/src/main/java/org/opensearch/action/search/SearchPhaseContext.java b/server/src/main/java/org/opensearch/action/search/SearchPhaseContext.java index 4ffd5521793f6..45d39a6f85ea2 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchPhaseContext.java +++ b/server/src/main/java/org/opensearch/action/search/SearchPhaseContext.java @@ -73,6 +73,8 @@ public interface SearchPhaseContext extends Executor { */ SearchRequest getRequest(); + SearchPhase getCurrentPhase(); + /** * Builds and sends the final search response back to the user. * diff --git a/server/src/main/java/org/opensearch/action/search/SearchPhaseName.java b/server/src/main/java/org/opensearch/action/search/SearchPhaseName.java index b6f842cf2cce1..4c0fe3ac06326 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchPhaseName.java +++ b/server/src/main/java/org/opensearch/action/search/SearchPhaseName.java @@ -13,6 +13,7 @@ * @opensearch.internal */ public enum SearchPhaseName { + DFS_PRE_QUERY("dfs_pre_query"), QUERY("query"), FETCH("fetch"), DFS_QUERY("dfs_query"), diff --git a/server/src/main/java/org/opensearch/action/search/SearchQueryThenFetchAsyncAction.java b/server/src/main/java/org/opensearch/action/search/SearchQueryThenFetchAsyncAction.java index f75ab2554e693..ca5ad087d3089 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/opensearch/action/search/SearchQueryThenFetchAsyncAction.java @@ -81,10 +81,11 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction listeners; + private final Logger logger; + + public CompositeListener(List listeners, Logger logger) { + this.listeners = listeners; + this.logger = logger; + } + + @Override + public void onPhaseStart(SearchPhaseContext context) { + for (SearchRequestOperationsListener listener : listeners) { + try { + listener.onPhaseStart(context); + } catch (Exception e) { + logger.warn(() -> new ParameterizedMessage("onPhaseStart listener [{}] failed", listener), e); + } + } + } + + @Override + public void onPhaseEnd(SearchPhaseContext context) { + for (SearchRequestOperationsListener listener : listeners) { + try { + listener.onPhaseEnd(context); + } catch (Exception e) { + logger.warn(() -> new ParameterizedMessage("onPhaseEnd listener [{}] failed", listener), e); + } + } + } + + @Override + public void onPhaseFailure(SearchPhaseContext context) { + for (SearchRequestOperationsListener listener : listeners) { + try { + listener.onPhaseFailure(context); + } catch (Exception e) { + logger.warn(() -> new ParameterizedMessage("onPhaseFailure listener [{}] failed", listener), e); + } + } + } + } +} diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java b/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java new file mode 100644 index 0000000000000..ad299c11b987d --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java @@ -0,0 +1,75 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.common.inject.Inject; +import org.opensearch.common.metrics.CounterMetric; +import org.opensearch.common.metrics.MeanMetric; + +import java.util.EnumMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +/** + * Request level search stats to track coordinator level node search latencies + * + * @opensearch.internal + */ +public final class SearchRequestStats implements SearchRequestOperationsListener { + Map phaseStatsMap = new EnumMap<>(SearchPhaseName.class); + + @Inject + public SearchRequestStats() { + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + phaseStatsMap.put(searchPhaseName, new StatsHolder()); + } + } + + public long getPhaseCurrent(SearchPhaseName searchPhaseName) { + return phaseStatsMap.get(searchPhaseName).current.count(); + } + + public long getPhaseTotal(SearchPhaseName searchPhaseName) { + return phaseStatsMap.get(searchPhaseName).total.count(); + } + + public long getPhaseMetric(SearchPhaseName searchPhaseName) { + return phaseStatsMap.get(searchPhaseName).timing.sum(); + } + + @Override + public void onPhaseStart(SearchPhaseContext context) { + phaseStatsMap.get(context.getCurrentPhase().getSearchPhaseName()).current.inc(); + } + + @Override + public void onPhaseEnd(SearchPhaseContext context) { + StatsHolder phaseStats = phaseStatsMap.get(context.getCurrentPhase().getSearchPhaseName()); + phaseStats.current.dec(); + phaseStats.total.inc(); + phaseStats.timing.inc(TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - context.getCurrentPhase().getStartTimeInNanos())); + } + + @Override + public void onPhaseFailure(SearchPhaseContext context) { + phaseStatsMap.get(context.getCurrentPhase().getSearchPhaseName()).current.dec(); + } + + /** + * Holder of statistics values + * + * @opensearch.internal + */ + + public static final class StatsHolder { + CounterMetric current = new CounterMetric(); + CounterMetric total = new CounterMetric(); + MeanMetric timing = new MeanMetric(); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java index 25ec0fc57d19f..cff1005beff27 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java @@ -67,6 +67,7 @@ import org.opensearch.core.common.breaker.CircuitBreaker; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.common.util.CollectionUtils; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.indices.breaker.CircuitBreakerService; @@ -145,6 +146,14 @@ public class TransportSearchAction extends HandledTransportAction SEARCH_REQUEST_STATS_ENABLED = Setting.boolSetting( + SEARCH_REQUEST_STATS_ENABLED_KEY, + false, + Property.Dynamic, + Property.NodeScope + ); + private final NodeClient client; private final ThreadPool threadPool; private final ClusterService clusterService; @@ -157,6 +166,10 @@ public class TransportSearchAction extends HandledTransportAction) SearchRequest::new); this.client = client; @@ -185,6 +199,13 @@ public TransportSearchAction( this.indexNameExpressionResolver = indexNameExpressionResolver; this.namedWriteableRegistry = namedWriteableRegistry; this.searchPipelineService = searchPipelineService; + this.isRequestStatsEnabled = clusterService.getClusterSettings().get(SEARCH_REQUEST_STATS_ENABLED); + clusterService.getClusterSettings().addSettingsUpdateConsumer(SEARCH_REQUEST_STATS_ENABLED, this::setIsRequestStatsEnabled); + this.searchRequestStats = searchRequestStats; + } + + private void setIsRequestStatsEnabled(boolean isRequestStatsEnabled) { + this.isRequestStatsEnabled = isRequestStatsEnabled; } private Map buildPerIndexAliasFilter( @@ -311,6 +332,13 @@ public void executeRequest( SinglePhaseSearchAction phaseSearchAction, ActionListener listener ) { + final List searchListenersList = createSearchListenerList(); + final SearchRequestOperationsListener searchRequestOperationsListener; + if (!CollectionUtils.isEmpty(searchListenersList)) { + searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener(searchListenersList, logger); + } else { + searchRequestOperationsListener = null; + } executeRequest(task, searchRequest, new SearchAsyncActionProvider() { @Override public AbstractSearchAsyncAction asyncSearchAction( @@ -346,7 +374,8 @@ public AbstractSearchAsyncAction asyncSearchAction( task, new ArraySearchPhaseResults<>(shardsIts.size()), searchRequest.getMaxConcurrentShardRequests(), - clusters + clusters, + searchRequestOperationsListener ) { @Override protected void executePhaseOnShard( @@ -916,9 +945,7 @@ private void executeSearch( @Nullable SearchContextId searchContext, SearchAsyncActionProvider searchAsyncActionProvider ) { - clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ); - // TODO: I think startTime() should become part of ActionRequest and that should be used both for index name // date math expressions and $now in scripts. This way all apis will deal with now in the same way instead // of just for the _search api @@ -968,11 +995,8 @@ private void executeSearch( indexRoutings = routingMap; } final GroupShardsIterator shardIterators = mergeShardsIterators(localShardIterators, remoteShardIterators); - failIfOverShardCountLimit(clusterService, shardIterators.size()); - Map concreteIndexBoosts = resolveIndexBoosts(searchRequest, clusterState); - // optimize search type for cases where there is only one shard group to search on if (shardIterators.size() == 1) { // if we only have one group, then we always want Q_T_F, no need for DFS, and no need to do THEN since we hit one shard @@ -1107,6 +1131,14 @@ AbstractSearchAsyncAction asyncSearchAction( ); } + private List createSearchListenerList() { + final List searchListenersList = new ArrayList<>(); + if (isRequestStatsEnabled) { + searchListenersList.add(searchRequestStats); + } + return searchListenersList; + } + private AbstractSearchAsyncAction searchAsyncAction( SearchTask task, SearchRequest searchRequest, @@ -1123,6 +1155,13 @@ private AbstractSearchAsyncAction searchAsyncAction ThreadPool threadPool, SearchResponse.Clusters clusters ) { + final List searchListenersList = createSearchListenerList(); + final SearchRequestOperationsListener searchRequestOperationsListener; + if (!CollectionUtils.isEmpty(searchListenersList)) { + searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener(searchListenersList, logger); + } else { + searchRequestOperationsListener = null; + } if (preFilter) { return new CanMatchPreFilterSearchPhase( logger, @@ -1162,7 +1201,8 @@ public void run() { } }; }, - clusters + clusters, + searchRequestOperationsListener ); } else { final QueryPhaseResultConsumer queryResultConsumer = searchPhaseController.newSearchPhaseResults( @@ -1192,7 +1232,8 @@ public void run() { timeProvider, clusterState, task, - clusters + clusters, + searchRequestOperationsListener ); break; case QUERY_THEN_FETCH: @@ -1212,7 +1253,8 @@ public void run() { timeProvider, clusterState, task, - clusters + clusters, + searchRequestOperationsListener ); break; default: diff --git a/server/src/main/java/org/opensearch/action/update/TransportUpdateAction.java b/server/src/main/java/org/opensearch/action/update/TransportUpdateAction.java index 95735f71a38e7..819112eb497f6 100644 --- a/server/src/main/java/org/opensearch/action/update/TransportUpdateAction.java +++ b/server/src/main/java/org/opensearch/action/update/TransportUpdateAction.java @@ -32,6 +32,7 @@ package org.opensearch.action.update; +import org.opensearch.ExceptionsHelper; import org.opensearch.ResourceAlreadyExistsException; import org.opensearch.action.ActionRunnable; import org.opensearch.action.DocWriteRequest; @@ -62,11 +63,13 @@ import org.opensearch.core.common.io.stream.NotSerializableExceptionWrapper; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.MediaType; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.IndexService; import org.opensearch.index.engine.VersionConflictEngineException; import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.IndexingStats.Stats.DocStatusStats; import org.opensearch.indices.IndicesService; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; @@ -154,10 +157,13 @@ public static void resolveAndValidateRouting(Metadata metadata, String concreteI @Override protected void doExecute(Task task, final UpdateRequest request, final ActionListener listener) { if (request.isRequireAlias() && (clusterService.state().getMetadata().hasAlias(request.index()) == false)) { - throw new IndexNotFoundException( + IndexNotFoundException e = new IndexNotFoundException( "[" + DocWriteRequest.REQUIRE_ALIAS + "] request flag is [true] and [" + request.index() + "] is not an alias", request.index() ); + + incDocStatusStats(e); + throw e; } // if we don't have a master, we don't have metadata, that's fine, let it find a cluster-manager using create index API if (autoCreateIndex.shouldAutoCreate(request.index(), clusterService.state())) { @@ -193,7 +199,10 @@ public void onFailure(Exception e) { } private void innerExecute(final Task task, final UpdateRequest request, final ActionListener listener) { - super.doExecute(task, request, listener); + super.doExecute(task, request, ActionListener.wrap(listener::onResponse, e -> { + incDocStatusStats(e); + listener.onFailure(e); + })); } @Override @@ -330,7 +339,13 @@ protected void shardOperation(final UpdateRequest request, final ActionListener< shard.noopUpdate(); } } + + DocStatusStats stats = new DocStatusStats(); + stats.inc(RestStatus.OK); + + indicesService.addDocStatusStats(stats); listener.onResponse(update); + break; default: throw new IllegalStateException("Illegal result " + result.getResponseResult()); @@ -361,4 +376,10 @@ private void handleUpdateFailureWithRetry( } listener.onFailure(cause instanceof Exception ? (Exception) cause : new NotSerializableExceptionWrapper(cause)); } + + private void incDocStatusStats(final Exception e) { + DocStatusStats stats = new DocStatusStats(); + stats.inc(ExceptionsHelper.status(e)); + indicesService.addDocStatusStats(stats); + } } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index c43353e9e64e0..b2861e566dd4b 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -88,6 +88,7 @@ import org.opensearch.index.mapper.MapperService.MergeReason; import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.shard.IndexSettingProvider; +import org.opensearch.index.translog.Translog; import org.opensearch.indices.IndexCreationException; import org.opensearch.indices.IndicesService; import org.opensearch.indices.InvalidIndexNameException; @@ -922,6 +923,7 @@ static Settings aggregateIndexSettings( validateTranslogRetentionSettings(indexSettings); validateStoreTypeSettings(indexSettings); validateRefreshIntervalSettings(request.settings(), clusterSettings); + validateTranslogDurabilitySettings(request.settings(), clusterSettings, settings); return indexSettings; } @@ -1491,7 +1493,7 @@ public static void validateTranslogRetentionSettings(Settings indexSettings) { /** * Validates {@code index.refresh_interval} is equal or below the {@code cluster.minimum.index.refresh_interval}. * - * @param requestSettings settings passed in during index create request + * @param requestSettings settings passed in during index create/update request * @param clusterSettings cluster setting */ static void validateRefreshIntervalSettings(Settings requestSettings, ClusterSettings clusterSettings) { @@ -1510,4 +1512,27 @@ static void validateRefreshIntervalSettings(Settings requestSettings, ClusterSet ); } } + + /** + * Validates {@code index.translog.durability} is not async if the {@code cluster.remote_store.index.restrict.async-durability} is set to true. + * + * @param requestSettings settings passed in during index create/update request + * @param clusterSettings cluster setting + */ + static void validateTranslogDurabilitySettings(Settings requestSettings, ClusterSettings clusterSettings, Settings settings) { + if (isRemoteStoreAttributePresent(settings) == false + || IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.exists(requestSettings) == false + || clusterSettings.get(IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING) == false) { + return; + } + Translog.Durability durability = IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.get(requestSettings); + if (durability.equals(Translog.Durability.ASYNC)) { + throw new IllegalArgumentException( + "index setting [index.translog.durability=async] is not allowed as cluster setting [" + + IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING.getKey() + + "=true]" + ); + } + + } } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java index 524980565fe39..7d4c3512ed757 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java @@ -73,6 +73,8 @@ import java.util.Set; import static org.opensearch.action.support.ContextPreservingActionListener.wrapPreservingContext; +import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateRefreshIntervalSettings; +import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateTranslogDurabilitySettings; import static org.opensearch.common.settings.AbstractScopedSettings.ARCHIVED_SETTINGS_PREFIX; import static org.opensearch.index.IndexSettings.same; @@ -127,7 +129,8 @@ public void updateSettings( .normalizePrefix(IndexMetadata.INDEX_SETTING_PREFIX) .build(); - MetadataCreateIndexService.validateRefreshIntervalSettings(normalizedSettings, clusterService.getClusterSettings()); + validateRefreshIntervalSettings(normalizedSettings, clusterService.getClusterSettings()); + validateTranslogDurabilitySettings(normalizedSettings, clusterService.getClusterSettings(), clusterService.getSettings()); Settings.Builder settingsForClosedIndices = Settings.builder(); Settings.Builder settingsForOpenIndices = Settings.builder(); diff --git a/server/src/main/java/org/opensearch/common/blobstore/AsyncMultiStreamEncryptedBlobContainer.java b/server/src/main/java/org/opensearch/common/blobstore/AsyncMultiStreamEncryptedBlobContainer.java index 07a0b49df47ff..c64dc6b9e3ae4 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/AsyncMultiStreamEncryptedBlobContainer.java +++ b/server/src/main/java/org/opensearch/common/blobstore/AsyncMultiStreamEncryptedBlobContainer.java @@ -12,12 +12,14 @@ import org.opensearch.common.blobstore.stream.read.ReadContext; import org.opensearch.common.blobstore.stream.write.WriteContext; import org.opensearch.common.crypto.CryptoHandler; +import org.opensearch.common.crypto.DecryptedRangedStreamProvider; import org.opensearch.common.io.InputStreamContainer; import org.opensearch.core.action.ActionListener; -import org.opensearch.threadpool.ThreadPool; import java.io.IOException; -import java.nio.file.Path; +import java.io.InputStream; +import java.util.List; +import java.util.stream.Collectors; /** * EncryptedBlobContainer is an encrypted BlobContainer that is backed by a @@ -44,12 +46,17 @@ public void asyncBlobUpload(WriteContext writeContext, ActionListener comp @Override public void readBlobAsync(String blobName, ActionListener listener) { - throw new UnsupportedOperationException(); - } - - @Override - public void asyncBlobDownload(String blobName, Path fileLocation, ThreadPool threadPool, ActionListener completionListener) { - throw new UnsupportedOperationException(); + try { + final U cryptoContext = cryptoHandler.loadEncryptionMetadata(getEncryptedHeaderContentSupplier(blobName)); + ActionListener decryptingCompletionListener = ActionListener.map( + listener, + readContext -> new DecryptedReadContext<>(readContext, cryptoHandler, cryptoContext) + ); + + blobContainer.readBlobAsync(blobName, decryptingCompletionListener); + } catch (Exception e) { + listener.onFailure(e); + } } @Override @@ -108,4 +115,58 @@ public InputStreamContainer provideStream(int partNumber) throws IOException { } } + + /** + * DecryptedReadContext decrypts the encrypted {@link ReadContext} by acting as a transformation wrapper around + * the encrypted object + * @param Encryption Metadata / CryptoContext for the {@link CryptoHandler} instance + * @param Parsed Encryption Metadata / CryptoContext for the {@link CryptoHandler} instance + */ + static class DecryptedReadContext extends ReadContext { + + private final CryptoHandler cryptoHandler; + private final U cryptoContext; + private Long blobSize; + + public DecryptedReadContext(ReadContext readContext, CryptoHandler cryptoHandler, U cryptoContext) { + super(readContext); + this.cryptoHandler = cryptoHandler; + this.cryptoContext = cryptoContext; + } + + @Override + public long getBlobSize() { + // initializes the value lazily + if (blobSize == null) { + this.blobSize = this.cryptoHandler.estimateDecryptedLength(cryptoContext, super.getBlobSize()); + } + return this.blobSize; + } + + @Override + public List getPartStreams() { + return super.getPartStreams().stream().map(this::decryptInputStreamContainer).collect(Collectors.toList()); + } + + /** + * Transforms an encrypted {@link InputStreamContainer} to a decrypted instance + * @param inputStreamContainer encrypted input stream container instance + * @return decrypted input stream container instance + */ + private InputStreamContainer decryptInputStreamContainer(InputStreamContainer inputStreamContainer) { + long startOfStream = inputStreamContainer.getOffset(); + long endOfStream = startOfStream + inputStreamContainer.getContentLength() - 1; + DecryptedRangedStreamProvider decryptedStreamProvider = cryptoHandler.createDecryptingStreamOfRange( + cryptoContext, + startOfStream, + endOfStream + ); + + long adjustedPos = decryptedStreamProvider.getAdjustedRange()[0]; + long adjustedLength = decryptedStreamProvider.getAdjustedRange()[1] - adjustedPos + 1; + final InputStream decryptedStream = decryptedStreamProvider.getDecryptedStreamProvider() + .apply(inputStreamContainer.getInputStream()); + return new InputStreamContainer(decryptedStream, adjustedLength, adjustedPos); + } + } } diff --git a/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobContainer.java b/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobContainer.java index 475d891ea9336..d0933741339d9 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobContainer.java +++ b/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobContainer.java @@ -50,7 +50,7 @@ public InputStream readBlob(String blobName) throws IOException { return cryptoHandler.createDecryptingStream(inputStream); } - private EncryptedHeaderContentSupplier getEncryptedHeaderContentSupplier(String blobName) { + EncryptedHeaderContentSupplier getEncryptedHeaderContentSupplier(String blobName) { return (start, end) -> { byte[] buffer; int length = (int) (end - start + 1); diff --git a/server/src/main/java/org/opensearch/common/blobstore/stream/read/ReadContext.java b/server/src/main/java/org/opensearch/common/blobstore/stream/read/ReadContext.java index 4ba17959f8040..2c305fb03c475 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/stream/read/ReadContext.java +++ b/server/src/main/java/org/opensearch/common/blobstore/stream/read/ReadContext.java @@ -28,6 +28,12 @@ public ReadContext(long blobSize, List partStreams, String this.blobChecksum = blobChecksum; } + public ReadContext(ReadContext readContext) { + this.blobSize = readContext.blobSize; + this.partStreams = readContext.partStreams; + this.blobChecksum = readContext.blobChecksum; + } + public String getBlobChecksum() { return blobChecksum; } diff --git a/server/src/main/java/org/opensearch/common/metrics/CounterMetric.java b/server/src/main/java/org/opensearch/common/metrics/CounterMetric.java index 5c48c1f772ff0..cb181840406a5 100644 --- a/server/src/main/java/org/opensearch/common/metrics/CounterMetric.java +++ b/server/src/main/java/org/opensearch/common/metrics/CounterMetric.java @@ -62,4 +62,5 @@ public void dec(long n) { public long count() { return counter.sum(); } + } diff --git a/server/src/main/java/org/opensearch/common/metrics/MeanMetric.java b/server/src/main/java/org/opensearch/common/metrics/MeanMetric.java index 33f12c8cb42d3..359facdce633b 100644 --- a/server/src/main/java/org/opensearch/common/metrics/MeanMetric.java +++ b/server/src/main/java/org/opensearch/common/metrics/MeanMetric.java @@ -79,4 +79,5 @@ public void clear() { counter.reset(); sum.reset(); } + } diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 74224d66400da..032027384f106 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -373,6 +373,7 @@ public void apply(Settings value, Settings current, Settings previous) { SearchService.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS, TransportSearchAction.SHARD_COUNT_LIMIT_SETTING, TransportSearchAction.SEARCH_CANCEL_AFTER_TIME_INTERVAL_SETTING, + TransportSearchAction.SEARCH_REQUEST_STATS_ENABLED, RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE, SniffConnectionStrategy.REMOTE_CONNECTIONS_PER_CLUSTER, RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING, @@ -671,7 +672,8 @@ public void apply(Settings value, Settings current, Settings previous) { // Remote cluster state settings RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING, RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING, - IndicesService.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING + IndicesService.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, + IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING ) ) ); diff --git a/server/src/main/java/org/opensearch/discovery/InitializeExtensionRequest.java b/server/src/main/java/org/opensearch/discovery/InitializeExtensionRequest.java index 33cdad3045780..6e9fb8b7201a4 100644 --- a/server/src/main/java/org/opensearch/discovery/InitializeExtensionRequest.java +++ b/server/src/main/java/org/opensearch/discovery/InitializeExtensionRequest.java @@ -25,16 +25,19 @@ public class InitializeExtensionRequest extends TransportRequest { private final DiscoveryNode sourceNode; private final DiscoveryExtensionNode extension; + private final String serviceAccountHeader; - public InitializeExtensionRequest(DiscoveryNode sourceNode, DiscoveryExtensionNode extension) { + public InitializeExtensionRequest(DiscoveryNode sourceNode, DiscoveryExtensionNode extension, String serviceAccountHeader) { this.sourceNode = sourceNode; this.extension = extension; + this.serviceAccountHeader = serviceAccountHeader; } public InitializeExtensionRequest(StreamInput in) throws IOException { super(in); sourceNode = new DiscoveryNode(in); extension = new DiscoveryExtensionNode(in); + serviceAccountHeader = in.readString(); } @Override @@ -42,6 +45,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); sourceNode.writeTo(out); extension.writeTo(out); + out.writeString(serviceAccountHeader); } public DiscoveryNode getSourceNode() { @@ -52,6 +56,10 @@ public DiscoveryExtensionNode getExtension() { return extension; } + public String getServiceAccountHeader() { + return serviceAccountHeader; + } + @Override public String toString() { return "InitializeExtensionsRequest{" + "sourceNode=" + sourceNode + ", extension=" + extension + '}'; @@ -62,7 +70,9 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; InitializeExtensionRequest that = (InitializeExtensionRequest) o; - return Objects.equals(sourceNode, that.sourceNode) && Objects.equals(extension, that.extension); + return Objects.equals(sourceNode, that.sourceNode) + && Objects.equals(extension, that.extension) + && Objects.equals(serviceAccountHeader, that.getServiceAccountHeader()); } @Override diff --git a/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java b/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java index 61a6e2faa09e0..9f9ba548143c6 100644 --- a/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java +++ b/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java @@ -41,6 +41,7 @@ import org.opensearch.extensions.settings.CustomSettingsRequestHandler; import org.opensearch.extensions.settings.RegisterCustomSettingsRequest; import org.opensearch.identity.IdentityService; +import org.opensearch.identity.tokens.AuthToken; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.ConnectTransportException; import org.opensearch.transport.TransportException; @@ -101,6 +102,7 @@ public static enum OpenSearchRequestType { private Settings environmentSettings; private AddSettingsUpdateConsumerRequestHandler addSettingsUpdateConsumerRequestHandler; private NodeClient client; + private IdentityService identityService; /** * Instantiate a new ExtensionsManager object to handle requests and responses from extensions. This is called during Node bootstrap. @@ -108,7 +110,7 @@ public static enum OpenSearchRequestType { * @param additionalSettings Additional settings to read in from extension initialization request * @throws IOException If the extensions discovery file is not properly retrieved. */ - public ExtensionsManager(Set> additionalSettings) throws IOException { + public ExtensionsManager(Set> additionalSettings, IdentityService identityService) throws IOException { logger.info("ExtensionsManager initialized"); this.initializedExtensions = new HashMap(); this.extensionIdMap = new HashMap(); @@ -123,6 +125,7 @@ public ExtensionsManager(Set> additionalSettings) throws IOException } this.client = null; this.extensionTransportActionsHandler = null; + this.identityService = identityService; } /** @@ -400,7 +403,7 @@ protected void doRun() throws Exception { transportService.sendRequest( extension, REQUEST_EXTENSION_ACTION_NAME, - new InitializeExtensionRequest(transportService.getLocalNode(), extension), + new InitializeExtensionRequest(transportService.getLocalNode(), extension, issueServiceAccount(extension)), initializeExtensionResponseHandler ); } @@ -443,6 +446,15 @@ TransportResponse handleExtensionRequest(ExtensionRequest extensionRequest) thro } } + /** + * A helper method called during initialization that issues a service accounts to extensions + * @param extension The extension to be issued a service account + */ + private String issueServiceAccount(DiscoveryExtensionNode extension) { + AuthToken serviceAccountToken = identityService.getTokenManager().issueServiceAccountToken(extension.getId()); + return serviceAccountToken.asAuthHeaderValue(); + } + static String getRequestExtensionActionName() { return REQUEST_EXTENSION_ACTION_NAME; } diff --git a/server/src/main/java/org/opensearch/extensions/NoopExtensionsManager.java b/server/src/main/java/org/opensearch/extensions/NoopExtensionsManager.java index 34aa3ec1367a7..81b1b91b11481 100644 --- a/server/src/main/java/org/opensearch/extensions/NoopExtensionsManager.java +++ b/server/src/main/java/org/opensearch/extensions/NoopExtensionsManager.java @@ -20,6 +20,7 @@ import org.opensearch.transport.TransportService; import java.io.IOException; +import java.util.List; import java.util.Optional; import java.util.Set; @@ -31,7 +32,7 @@ public class NoopExtensionsManager extends ExtensionsManager { public NoopExtensionsManager() throws IOException { - super(Set.of()); + super(Set.of(), new IdentityService(Settings.EMPTY, List.of())); } @Override diff --git a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java index e42ac8daa3b1c..6b26af148b2ea 100644 --- a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java @@ -78,6 +78,7 @@ import java.util.HashMap; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; @@ -171,11 +172,12 @@ public void start( // If the cluster UUID loaded from local is unknown (_na_) then fetch the best state from remote // If there is no valid state on remote, continue with initial empty state // If there is a valid state, then restore index metadata using this state + String lastKnownClusterUUID = ClusterState.UNKNOWN_UUID; if (ClusterState.UNKNOWN_UUID.equals(clusterState.metadata().clusterUUID())) { - String lastKnownClusterUUID = remoteClusterStateService.getLastKnownUUIDFromRemote( + lastKnownClusterUUID = remoteClusterStateService.getLastKnownUUIDFromRemote( clusterState.getClusterName().value() ); - if (!ClusterState.UNKNOWN_UUID.equals(lastKnownClusterUUID)) { + if (ClusterState.UNKNOWN_UUID.equals(lastKnownClusterUUID) == false) { // Load state from remote final RemoteRestoreResult remoteRestoreResult = remoteStoreRestoreService.restore( clusterState, @@ -186,7 +188,7 @@ public void start( clusterState = remoteRestoreResult.getClusterState(); } } - remotePersistedState = new RemotePersistedState(remoteClusterStateService); + remotePersistedState = new RemotePersistedState(remoteClusterStateService, lastKnownClusterUUID); } persistedState = new LucenePersistedState(persistedClusterStateService, currentTerm, clusterState); } else { @@ -647,9 +649,11 @@ public static class RemotePersistedState implements PersistedState { private ClusterState lastAcceptedState; private ClusterMetadataManifest lastAcceptedManifest; private final RemoteClusterStateService remoteClusterStateService; + private String previousClusterUUID; - public RemotePersistedState(final RemoteClusterStateService remoteClusterStateService) { + public RemotePersistedState(final RemoteClusterStateService remoteClusterStateService, final String previousClusterUUID) { this.remoteClusterStateService = remoteClusterStateService; + this.previousClusterUUID = previousClusterUUID; } @Override @@ -674,7 +678,26 @@ public void setLastAcceptedState(ClusterState clusterState) { try { final ClusterMetadataManifest manifest; if (shouldWriteFullClusterState(clusterState)) { - manifest = remoteClusterStateService.writeFullMetadata(clusterState); + if (clusterState.metadata().clusterUUIDCommitted() == true) { + final Optional latestManifest = remoteClusterStateService.getLatestClusterMetadataManifest( + clusterState.getClusterName().value(), + clusterState.metadata().clusterUUID() + ); + if (latestManifest.isPresent()) { + // The previous UUID should not change for the current UUID. So fetching the latest manifest + // from remote store and getting the previous UUID. + previousClusterUUID = latestManifest.get().getPreviousClusterUUID(); + } else { + // When the user starts the cluster with remote state disabled but later enables the remote state, + // there will not be any manifest for the current cluster UUID. + logger.error( + "Latest manifest is not present in remote store for cluster UUID: {}", + clusterState.metadata().clusterUUID() + ); + previousClusterUUID = ClusterState.UNKNOWN_UUID; + } + } + manifest = remoteClusterStateService.writeFullMetadata(clusterState, previousClusterUUID); } else { assert verifyManifestAndClusterState(lastAcceptedManifest, lastAcceptedState) == true : "Previous manifest and previous ClusterState are not in sync"; @@ -723,11 +746,19 @@ public void markLastAcceptedStateAsCommitted() { try { assert lastAcceptedState != null : "Last accepted state is not present"; assert lastAcceptedManifest != null : "Last accepted manifest is not present"; + ClusterState clusterState = lastAcceptedState; + if (lastAcceptedState.metadata().clusterUUID().equals(Metadata.UNKNOWN_CLUSTER_UUID) == false + && lastAcceptedState.metadata().clusterUUIDCommitted() == false) { + Metadata.Builder metadataBuilder = Metadata.builder(lastAcceptedState.metadata()); + metadataBuilder.clusterUUIDCommitted(true); + clusterState = ClusterState.builder(lastAcceptedState).metadata(metadataBuilder).build(); + } final ClusterMetadataManifest committedManifest = remoteClusterStateService.markLastStateAsCommitted( - lastAcceptedState, + clusterState, lastAcceptedManifest ); lastAcceptedManifest = committedManifest; + lastAcceptedState = clusterState; } catch (Exception e) { handleExceptionOnWrite(e); } diff --git a/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java b/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java index 040c0663efbd9..40b16f3d6323b 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java +++ b/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java @@ -42,6 +42,7 @@ public class ClusterMetadataManifest implements Writeable, ToXContentFragment { private static final ParseField COMMITTED_FIELD = new ParseField("committed"); private static final ParseField INDICES_FIELD = new ParseField("indices"); private static final ParseField PREVIOUS_CLUSTER_UUID = new ParseField("previous_cluster_uuid"); + private static final ParseField CLUSTER_UUID_COMMITTED = new ParseField("cluster_uuid_committed"); private static long term(Object[] fields) { return (long) fields[0]; @@ -79,6 +80,10 @@ private static String previousClusterUUID(Object[] fields) { return (String) fields[8]; } + private static boolean clusterUUIDCommitted(Object[] fields) { + return (boolean) fields[9]; + } + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "cluster_metadata_manifest", fields -> new ClusterMetadataManifest( @@ -90,7 +95,8 @@ private static String previousClusterUUID(Object[] fields) { nodeId(fields), committed(fields), indices(fields), - previousClusterUUID(fields) + previousClusterUUID(fields), + clusterUUIDCommitted(fields) ) ); @@ -108,6 +114,7 @@ private static String previousClusterUUID(Object[] fields) { INDICES_FIELD ); PARSER.declareString(ConstructingObjectParser.constructorArg(), PREVIOUS_CLUSTER_UUID); + PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), CLUSTER_UUID_COMMITTED); } private final List indices; @@ -119,6 +126,7 @@ private static String previousClusterUUID(Object[] fields) { private final String nodeId; private final boolean committed; private final String previousClusterUUID; + private final boolean clusterUUIDCommitted; public List getIndices() { return indices; @@ -156,6 +164,10 @@ public String getPreviousClusterUUID() { return previousClusterUUID; } + public boolean isClusterUUIDCommitted() { + return clusterUUIDCommitted; + } + public ClusterMetadataManifest( long clusterTerm, long version, @@ -165,7 +177,8 @@ public ClusterMetadataManifest( String nodeId, boolean committed, List indices, - String previousClusterUUID + String previousClusterUUID, + boolean clusterUUIDCommitted ) { this.clusterTerm = clusterTerm; this.stateVersion = version; @@ -176,6 +189,7 @@ public ClusterMetadataManifest( this.committed = committed; this.indices = Collections.unmodifiableList(indices); this.previousClusterUUID = previousClusterUUID; + this.clusterUUIDCommitted = clusterUUIDCommitted; } public ClusterMetadataManifest(StreamInput in) throws IOException { @@ -188,6 +202,7 @@ public ClusterMetadataManifest(StreamInput in) throws IOException { this.committed = in.readBoolean(); this.indices = Collections.unmodifiableList(in.readList(UploadedIndexMetadata::new)); this.previousClusterUUID = in.readString(); + this.clusterUUIDCommitted = in.readBoolean(); } public static Builder builder() { @@ -215,6 +230,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } builder.endArray(); builder.field(PREVIOUS_CLUSTER_UUID.getPreferredName(), getPreviousClusterUUID()); + builder.field(CLUSTER_UUID_COMMITTED.getPreferredName(), isClusterUUIDCommitted()); return builder; } @@ -229,6 +245,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(committed); out.writeCollection(indices); out.writeString(previousClusterUUID); + out.writeBoolean(clusterUUIDCommitted); } @Override @@ -248,7 +265,8 @@ public boolean equals(Object o) { && Objects.equals(opensearchVersion, that.opensearchVersion) && Objects.equals(nodeId, that.nodeId) && Objects.equals(committed, that.committed) - && Objects.equals(previousClusterUUID, that.previousClusterUUID); + && Objects.equals(previousClusterUUID, that.previousClusterUUID) + && Objects.equals(clusterUUIDCommitted, that.clusterUUIDCommitted); } @Override @@ -262,7 +280,8 @@ public int hashCode() { opensearchVersion, nodeId, committed, - previousClusterUUID + previousClusterUUID, + clusterUUIDCommitted ); } @@ -291,6 +310,7 @@ public static class Builder { private String nodeId; private String previousClusterUUID; private boolean committed; + private boolean clusterUUIDCommitted; public Builder indices(List indices) { this.indices = indices; @@ -341,6 +361,11 @@ public Builder previousClusterUUID(String previousClusterUUID) { return this; } + public Builder clusterUUIDCommitted(boolean clusterUUIDCommitted) { + this.clusterUUIDCommitted = clusterUUIDCommitted; + return this; + } + public Builder() { indices = new ArrayList<>(); } @@ -355,6 +380,7 @@ public Builder(ClusterMetadataManifest manifest) { this.committed = manifest.committed; this.indices = new ArrayList<>(manifest.indices); this.previousClusterUUID = manifest.previousClusterUUID; + this.clusterUUIDCommitted = manifest.clusterUUIDCommitted; } public ClusterMetadataManifest build() { @@ -367,7 +393,8 @@ public ClusterMetadataManifest build() { nodeId, committed, indices, - previousClusterUUID + previousClusterUUID, + clusterUUIDCommitted ); } diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java index cf750bb11f3f8..dddc5376803a5 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java @@ -152,20 +152,13 @@ private BlobStoreTransferService getBlobStoreTransferService() { * @return A manifest object which contains the details of uploaded entity metadata. */ @Nullable - public ClusterMetadataManifest writeFullMetadata(ClusterState clusterState) throws IOException { + public ClusterMetadataManifest writeFullMetadata(ClusterState clusterState, String previousClusterUUID) throws IOException { final long startTimeNanos = relativeTimeNanosSupplier.getAsLong(); if (clusterState.nodes().isLocalNodeElectedClusterManager() == false) { logger.error("Local node is not elected cluster manager. Exiting"); return null; } - // should fetch the previous cluster UUID before writing full cluster state. - // Whenever a new election happens, a new leader will be elected and it might have stale previous UUID - final String previousClusterUUID = fetchPreviousClusterUUID( - clusterState.getClusterName().value(), - clusterState.metadata().clusterUUID() - ); - // any validations before/after upload ? final List allUploadedIndexMetadata = writeIndexMetadataParallel( clusterState, @@ -436,7 +429,8 @@ private ClusterMetadataManifest uploadManifest( nodeId, committed, uploadedIndexMetadata, - previousClusterUUID + previousClusterUUID, + clusterState.metadata().clusterUUIDCommitted() ); writeMetadataManifest(clusterState.getClusterName().value(), clusterState.metadata().clusterUUID(), manifest, manifestFileName); return manifest; @@ -582,7 +576,7 @@ public String getLastKnownUUIDFromRemote(String clusterName) { try { Set clusterUUIDs = getAllClusterUUIDs(clusterName); Map latestManifests = getLatestManifestForAllClusterUUIDs(clusterName, clusterUUIDs); - List validChain = createClusterChain(latestManifests); + List validChain = createClusterChain(latestManifests, clusterName); if (validChain.isEmpty()) { return ClusterState.UNKNOWN_UUID; } @@ -623,7 +617,7 @@ private Map getLatestManifestForAllClusterUUIDs * @param manifestsByClusterUUID Map of latest ClusterMetadataManifest for every cluster UUID * @return List of cluster UUIDs. The first element is the most recent cluster UUID in the chain */ - private List createClusterChain(final Map manifestsByClusterUUID) { + private List createClusterChain(final Map manifestsByClusterUUID, final String clusterName) { final Map clusterUUIDGraph = manifestsByClusterUUID.values() .stream() .collect(Collectors.toMap(ClusterMetadataManifest::getClusterUUID, ClusterMetadataManifest::getPreviousClusterUUID)); @@ -637,18 +631,29 @@ private List createClusterChain(final Map 1) { - throw new IllegalStateException( - String.format( - Locale.ROOT, - "The system has ended into multiple valid cluster states in the remote store. " - + "Please check their latest manifest to decide which one you want to keep. Valid Cluster UUIDs: - %s", - validClusterUUIDs - ) + // If the valid cluster UUIDs are more that 1, it means there was some race condition where + // more then 2 cluster manager nodes tried to become active cluster manager and published + // 2 cluster UUIDs which followed the same previous UUID. + final Map manifestsByClusterUUIDTrimmed = trimClusterUUIDs( + manifestsByClusterUUID, + validClusterUUIDs, + clusterName ); + if (manifestsByClusterUUID.size() == manifestsByClusterUUIDTrimmed.size()) { + throw new IllegalStateException( + String.format( + Locale.ROOT, + "The system has ended into multiple valid cluster states in the remote store. " + + "Please check their latest manifest to decide which one you want to keep. Valid Cluster UUIDs: - %s", + validClusterUUIDs + ) + ); + } + return createClusterChain(manifestsByClusterUUIDTrimmed, clusterName); } final List validChain = new ArrayList<>(); String currentUUID = validClusterUUIDs.get(0); - while (!ClusterState.UNKNOWN_UUID.equals(currentUUID)) { + while (currentUUID != null && !ClusterState.UNKNOWN_UUID.equals(currentUUID)) { validChain.add(currentUUID); // Getting the previous cluster UUID of a cluster UUID from the clusterUUID Graph currentUUID = clusterUUIDGraph.get(currentUUID); @@ -656,8 +661,61 @@ private List createClusterChain(final Map trimClusterUUIDs( + final Map latestManifestsByClusterUUID, + final List validClusterUUIDs, + final String clusterName + ) { + final Map trimmedUUIDs = new HashMap<>(latestManifestsByClusterUUID); + for (String clusterUUID : validClusterUUIDs) { + ClusterMetadataManifest currentManifest = trimmedUUIDs.get(clusterUUID); + // Here we compare the manifest of current UUID to that of previous UUID + // In case currentUUID's latest manifest is same as previous UUIDs latest manifest, + // that means it was restored from previousUUID and no IndexMetadata update was performed on it. + if (ClusterState.UNKNOWN_UUID.equals(currentManifest.getPreviousClusterUUID())) { + if (currentManifest.getIndices().isEmpty()) { + trimmedUUIDs.remove(clusterUUID); + } + } else { + ClusterMetadataManifest previousManifest = trimmedUUIDs.get(currentManifest.getPreviousClusterUUID()); + if (isMetadataEqual(currentManifest, previousManifest, clusterName)) { + trimmedUUIDs.remove(clusterUUID); + } + } + } + return trimmedUUIDs; + } + + private boolean isMetadataEqual(ClusterMetadataManifest first, ClusterMetadataManifest second, String clusterName) { + // todo clusterName can be set as final in the constructor + if (first.getIndices().size() != second.getIndices().size()) { + return false; + } + final Map secondIndices = second.getIndices() + .stream() + .collect(Collectors.toMap(md -> md.getIndexName(), Function.identity())); + for (UploadedIndexMetadata uploadedIndexMetadata : first.getIndices()) { + final IndexMetadata firstIndexMetadata = getIndexMetadata(clusterName, first.getClusterUUID(), uploadedIndexMetadata); + final UploadedIndexMetadata secondUploadedIndexMetadata = secondIndices.get(uploadedIndexMetadata.getIndexName()); + if (secondUploadedIndexMetadata == null) { + return false; + } + final IndexMetadata secondIndexMetadata = getIndexMetadata(clusterName, second.getClusterUUID(), secondUploadedIndexMetadata); + if (firstIndexMetadata.equals(secondIndexMetadata) == false) { + return false; + } + } + return true; + } + private boolean isInvalidClusterUUID(ClusterMetadataManifest manifest) { - return !manifest.isCommitted() && manifest.getIndices().isEmpty(); + return !manifest.isClusterUUIDCommitted(); } /** @@ -729,6 +787,7 @@ public IndexMetadataTransferException(String errorDesc, Throwable cause) { /** * Purges all remote cluster state against provided cluster UUIDs + * * @param clusterName name of the cluster * @param clusterUUIDs clusteUUIDs for which the remote state needs to be purged */ @@ -760,6 +819,7 @@ public void onFailure(Exception e) { /** * Deletes older than last {@code versionsToRetain} manifests. Also cleans up unreferenced IndexMetadata associated with older manifests + * * @param clusterName name of the cluster * @param clusterUUID uuid of cluster state to refer to in remote * @param manifestsToRetain no of latest manifest files to keep in remote diff --git a/server/src/main/java/org/opensearch/identity/noop/NoopTokenManager.java b/server/src/main/java/org/opensearch/identity/noop/NoopTokenManager.java index 1255e822cea6e..1dc3a58916b5c 100644 --- a/server/src/main/java/org/opensearch/identity/noop/NoopTokenManager.java +++ b/server/src/main/java/org/opensearch/identity/noop/NoopTokenManager.java @@ -37,6 +37,20 @@ public String asAuthHeaderValue() { }; } + /** + * Issue a new Noop Token + * @return a new Noop Token + */ + @Override + public AuthToken issueServiceAccountToken(final String audience) { + return new AuthToken() { + @Override + public String asAuthHeaderValue() { + return "noopToken"; + } + }; + } + @Override public Subject authenticateToken(AuthToken authToken) { return null; diff --git a/server/src/main/java/org/opensearch/identity/tokens/BasicAuthToken.java b/server/src/main/java/org/opensearch/identity/tokens/BasicAuthToken.java index 71b8fe504a5d1..4ad0bbe67d2a1 100644 --- a/server/src/main/java/org/opensearch/identity/tokens/BasicAuthToken.java +++ b/server/src/main/java/org/opensearch/identity/tokens/BasicAuthToken.java @@ -23,7 +23,7 @@ public final class BasicAuthToken implements AuthToken { public BasicAuthToken(final String headerValue) { final String base64Encoded = headerValue.substring(TOKEN_IDENTIFIER.length()).trim(); - final byte[] rawDecoded = Base64.getDecoder().decode(base64Encoded); + final byte[] rawDecoded = Base64.getUrlDecoder().decode(base64Encoded); final String usernamepassword = new String(rawDecoded, StandardCharsets.UTF_8); final String[] tokenParts = usernamepassword.split(":", 2); diff --git a/server/src/main/java/org/opensearch/identity/tokens/TokenManager.java b/server/src/main/java/org/opensearch/identity/tokens/TokenManager.java index 4f6ddeb48dea3..b4048251a06a2 100644 --- a/server/src/main/java/org/opensearch/identity/tokens/TokenManager.java +++ b/server/src/main/java/org/opensearch/identity/tokens/TokenManager.java @@ -23,6 +23,14 @@ public interface TokenManager { */ public AuthToken issueOnBehalfOfToken(final Subject subject, final OnBehalfOfClaims claims); + /** + * Create a new service account token + * + * @param audience: A string representing the unique id of the extension for which a service account token should be generated + * @return a new auth token + */ + public AuthToken issueServiceAccountToken(final String audience); + /** * Authenticates a provided authToken * @param authToken: The authToken to authenticate diff --git a/server/src/main/java/org/opensearch/index/IndexService.java b/server/src/main/java/org/opensearch/index/IndexService.java index 38456ad3e43a5..ca0cc307e460b 100644 --- a/server/src/main/java/org/opensearch/index/IndexService.java +++ b/server/src/main/java/org/opensearch/index/IndexService.java @@ -481,7 +481,7 @@ public synchronized IndexShard createShard( Store remoteStore = null; if (this.indexSettings.isRemoteStoreEnabled()) { Directory remoteDirectory = remoteDirectoryFactory.newDirectory(this.indexSettings, path); - remoteStore = new Store(shardId, this.indexSettings, remoteDirectory, lock, Store.OnClose.EMPTY); + remoteStore = new Store(shardId, this.indexSettings, remoteDirectory, lock, Store.OnClose.EMPTY, path); } Directory directory = directoryFactory.newDirectory(this.indexSettings, path); @@ -490,7 +490,8 @@ public synchronized IndexShard createShard( this.indexSettings, directory, lock, - new StoreCloseListener(shardId, () -> eventListener.onStoreClosed(shardId)) + new StoreCloseListener(shardId, () -> eventListener.onStoreClosed(shardId)), + path ); eventListener.onStoreCreated(shardId); indexShard = new IndexShard( diff --git a/server/src/main/java/org/opensearch/index/OpenSearchTieredMergePolicy.java b/server/src/main/java/org/opensearch/index/OpenSearchTieredMergePolicy.java index 637282374de73..df1666e72f2ee 100644 --- a/server/src/main/java/org/opensearch/index/OpenSearchTieredMergePolicy.java +++ b/server/src/main/java/org/opensearch/index/OpenSearchTieredMergePolicy.java @@ -42,7 +42,7 @@ /** * Wrapper around {@link TieredMergePolicy} which doesn't respect - * {@link TieredMergePolicy#setMaxMergedSegmentMB(double)} on forced merges. + * {@link TieredMergePolicy#setMaxMergedSegmentMB(double)} on forced merges, but DOES respect it on only_expunge_deletes. * See https://issues.apache.org/jira/browse/LUCENE-7976. * * @opensearch.internal @@ -71,7 +71,7 @@ public MergeSpecification findForcedMerges( @Override public MergeSpecification findForcedDeletesMerges(SegmentInfos infos, MergeContext mergeContext) throws IOException { - return forcedMergePolicy.findForcedDeletesMerges(infos, mergeContext); + return regularMergePolicy.findForcedDeletesMerges(infos, mergeContext); } public void setForceMergeDeletesPctAllowed(double forceMergeDeletesPctAllowed) { @@ -80,7 +80,7 @@ public void setForceMergeDeletesPctAllowed(double forceMergeDeletesPctAllowed) { } public double getForceMergeDeletesPctAllowed() { - return forcedMergePolicy.getForceMergeDeletesPctAllowed(); + return regularMergePolicy.getForceMergeDeletesPctAllowed(); } public void setFloorSegmentMB(double mbFrac) { diff --git a/server/src/main/java/org/opensearch/index/analysis/Analysis.java b/server/src/main/java/org/opensearch/index/analysis/Analysis.java index f4465c9dffac6..b9a219057f326 100644 --- a/server/src/main/java/org/opensearch/index/analysis/Analysis.java +++ b/server/src/main/java/org/opensearch/index/analysis/Analysis.java @@ -87,6 +87,7 @@ import java.util.Locale; import java.util.Map; import java.util.Set; +import java.util.regex.Pattern; import static java.util.Collections.unmodifiableMap; @@ -98,6 +99,9 @@ public class Analysis { private static final Logger LOGGER = LogManager.getLogger(Analysis.class); + // Regular expression to support hashtag tokenization + private static final Pattern HASH_TAG_RULE_PATTERN = Pattern.compile("^\\s*#\\s*=>"); + public static CharArraySet parseStemExclusion(Settings settings, CharArraySet defaultStemExclusion) { String value = settings.get("stem_exclusion"); if ("_none_".equals(value)) { @@ -222,16 +226,6 @@ public static List parseWordList(Environment env, Settings settings, Stri return parseWordList(env, settings, settingPrefix + "_path", settingPrefix, parser); } - public static List parseWordList( - Environment env, - Settings settings, - String settingPrefix, - CustomMappingRuleParser parser, - boolean removeComments - ) { - return parseWordList(env, settings, settingPrefix + "_path", settingPrefix, parser, removeComments); - } - /** * Parses a list of words from the specified settings or from a file, with the given parser. * @@ -246,17 +240,6 @@ public static List parseWordList( String settingPath, String settingList, CustomMappingRuleParser parser - ) { - return parseWordList(env, settings, settingPath, settingList, parser, true); - } - - public static List parseWordList( - Environment env, - Settings settings, - String settingPath, - String settingList, - CustomMappingRuleParser parser, - boolean removeComments ) { List words = getWordList(env, settings, settingPath, settingList); if (words == null) { @@ -266,7 +249,7 @@ public static List parseWordList( int lineNum = 0; for (String word : words) { lineNum++; - if (removeComments == false || word.startsWith("#") == false) { + if (word.startsWith("#") == false || HASH_TAG_RULE_PATTERN.matcher(word).find() == true) { try { rules.add(parser.apply(word)); } catch (RuntimeException ex) { diff --git a/server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java b/server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java index 417cdd5a8f030..c1f69d1ef3638 100644 --- a/server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java +++ b/server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java @@ -52,7 +52,7 @@ import org.apache.lucene.index.Terms; import org.apache.lucene.index.VectorEncoding; import org.apache.lucene.index.VectorSimilarityFunction; -import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.KnnCollector; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; import org.opensearch.common.util.set.Sets; @@ -271,12 +271,12 @@ public ByteVectorValues getByteVectorValues(String field) throws IOException { } @Override - public TopDocs searchNearestVectors(String field, float[] target, int k, Bits acceptDocs, int visitedLimit) throws IOException { + public void searchNearestVectors(String field, byte[] target, KnnCollector k, Bits acceptDocs) throws IOException { throw new UnsupportedOperationException(); } @Override - public TopDocs searchNearestVectors(String field, byte[] target, int k, Bits acceptDocs, int visitedLimit) throws IOException { + public void searchNearestVectors(String field, float[] target, KnnCollector k, Bits acceptDocs) throws IOException { throw new UnsupportedOperationException(); } } diff --git a/server/src/main/java/org/opensearch/index/mapper/MapperService.java b/server/src/main/java/org/opensearch/index/mapper/MapperService.java index 93928a464b138..ecb965061a828 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/opensearch/index/mapper/MapperService.java @@ -172,9 +172,6 @@ public enum MergeReason { ); private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(MapperService.class); - static final String DEFAULT_MAPPING_ERROR_MESSAGE = "[_default_] mappings are not allowed on new indices and should no " - + "longer be used. See [https://www.elastic.co/guide/en/elasticsearch/reference/current/breaking-changes-7.0.html" - + "#default-mapping-not-allowed] for more information."; private final IndexAnalyzers indexAnalyzers; diff --git a/server/src/main/java/org/opensearch/index/query/BoolQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/BoolQueryBuilder.java index 65c2dfa9c5a8b..c44a7ef6a397c 100644 --- a/server/src/main/java/org/opensearch/index/query/BoolQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/BoolQueryBuilder.java @@ -427,4 +427,35 @@ private static boolean rewriteClauses( } return changed; } + + @Override + public void visit(QueryBuilderVisitor visitor) { + visitor.accept(this); + if (mustClauses.isEmpty() == false) { + QueryBuilderVisitor subVisitor = visitor.getChildVisitor(Occur.MUST); + for (QueryBuilder mustClause : mustClauses) { + mustClause.visit(subVisitor); + } + } + if (shouldClauses.isEmpty() == false) { + QueryBuilderVisitor subVisitor = visitor.getChildVisitor(Occur.SHOULD); + for (QueryBuilder shouldClause : shouldClauses) { + shouldClause.visit(subVisitor); + } + } + if (mustNotClauses.isEmpty() == false) { + QueryBuilderVisitor subVisitor = visitor.getChildVisitor(Occur.MUST_NOT); + for (QueryBuilder mustNotClause : mustNotClauses) { + mustNotClause.visit(subVisitor); + } + } + if (filterClauses.isEmpty() == false) { + QueryBuilderVisitor subVisitor = visitor.getChildVisitor(Occur.FILTER); + for (QueryBuilder filterClause : filterClauses) { + filterClause.visit(subVisitor); + } + } + + } + } diff --git a/server/src/main/java/org/opensearch/index/query/BoostingQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/BoostingQueryBuilder.java index 26124b422f26f..1b52ae2f03605 100644 --- a/server/src/main/java/org/opensearch/index/query/BoostingQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/BoostingQueryBuilder.java @@ -33,6 +33,7 @@ package org.opensearch.index.query; import org.apache.lucene.queries.function.FunctionScoreQuery; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.Query; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; @@ -252,4 +253,15 @@ protected void extractInnerHitBuilders(Map inner InnerHitContextBuilder.extractInnerHits(positiveQuery, innerHits); InnerHitContextBuilder.extractInnerHits(negativeQuery, innerHits); } + + @Override + public void visit(QueryBuilderVisitor visitor) { + visitor.accept(this); + if (positiveQuery != null) { + visitor.getChildVisitor(BooleanClause.Occur.MUST).accept(positiveQuery); + } + if (negativeQuery != null) { + visitor.getChildVisitor(BooleanClause.Occur.SHOULD).accept(negativeQuery); + } + } } diff --git a/server/src/main/java/org/opensearch/index/query/ConstantScoreQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/ConstantScoreQueryBuilder.java index 6a29ad8a0a401..b2764d29da80a 100644 --- a/server/src/main/java/org/opensearch/index/query/ConstantScoreQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/ConstantScoreQueryBuilder.java @@ -32,6 +32,7 @@ package org.opensearch.index.query; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.Query; import org.opensearch.core.ParseField; @@ -183,4 +184,11 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws protected void extractInnerHitBuilders(Map innerHits) { InnerHitContextBuilder.extractInnerHits(filterBuilder, innerHits); } + + @Override + public void visit(QueryBuilderVisitor visitor) { + visitor.accept(this); + visitor.getChildVisitor(BooleanClause.Occur.FILTER).accept(filterBuilder); + } + } diff --git a/server/src/main/java/org/opensearch/index/query/DisMaxQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/DisMaxQueryBuilder.java index 91f4a02fac6c0..bd8ec62f6c43e 100644 --- a/server/src/main/java/org/opensearch/index/query/DisMaxQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/DisMaxQueryBuilder.java @@ -32,6 +32,7 @@ package org.opensearch.index.query; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.Query; import org.opensearch.common.lucene.search.Queries; @@ -246,4 +247,15 @@ protected void extractInnerHitBuilders(Map inner InnerHitContextBuilder.extractInnerHits(query, innerHits); } } + + @Override + public void visit(QueryBuilderVisitor visitor) { + visitor.accept(this); + if (queries.isEmpty() == false) { + QueryBuilderVisitor subVisitor = visitor.getChildVisitor(BooleanClause.Occur.SHOULD); + for (QueryBuilder subQb : queries) { + subVisitor.accept(subQb); + } + } + } } diff --git a/server/src/main/java/org/opensearch/index/query/FieldMaskingSpanQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/FieldMaskingSpanQueryBuilder.java index 09a71795a3f27..1162689a54689 100644 --- a/server/src/main/java/org/opensearch/index/query/FieldMaskingSpanQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/FieldMaskingSpanQueryBuilder.java @@ -34,6 +34,7 @@ import org.apache.lucene.queries.spans.FieldMaskingSpanQuery; import org.apache.lucene.queries.spans.SpanQuery; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.Query; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; @@ -207,4 +208,10 @@ protected boolean doEquals(FieldMaskingSpanQueryBuilder other) { public String getWriteableName() { return SPAN_FIELD_MASKING_FIELD.getPreferredName(); } + + @Override + public void visit(QueryBuilderVisitor visitor) { + visitor.accept(this); + visitor.getChildVisitor(BooleanClause.Occur.MUST).accept(queryBuilder); + } } diff --git a/server/src/main/java/org/opensearch/index/query/QueryBuilder.java b/server/src/main/java/org/opensearch/index/query/QueryBuilder.java index a40ccf427794a..090f74c5be7fe 100644 --- a/server/src/main/java/org/opensearch/index/query/QueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/QueryBuilder.java @@ -95,4 +95,13 @@ public interface QueryBuilder extends NamedWriteable, ToXContentObject, Rewritea default QueryBuilder rewrite(QueryRewriteContext queryShardContext) throws IOException { return this; } + + /** + * Recurse through the QueryBuilder tree, visiting any child QueryBuilder. + * @param visitor a query builder visitor to be called by each query builder in the tree. + */ + default void visit(QueryBuilderVisitor visitor) { + visitor.accept(this); + }; + } diff --git a/server/src/main/java/org/opensearch/index/query/QueryBuilderVisitor.java b/server/src/main/java/org/opensearch/index/query/QueryBuilderVisitor.java new file mode 100644 index 0000000000000..af5a125f9dd95 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/query/QueryBuilderVisitor.java @@ -0,0 +1,48 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.query; + +import org.apache.lucene.search.BooleanClause; + +/** + * QueryBuilderVisitor is an interface to define Visitor Object to be traversed in QueryBuilder tree. + */ +public interface QueryBuilderVisitor { + + /** + * Accept method is called when the visitor accepts the queryBuilder object to be traversed in the query tree. + * @param qb is a queryBuilder object which is accepeted by the visitor. + */ + void accept(QueryBuilder qb); + + /** + * Fetches the child sub visitor from the main QueryBuilderVisitor Object. + * @param occur defines the occurrence of the result fetched from the search query in the final search result. + * @return a child queryBuilder Visitor Object. + */ + QueryBuilderVisitor getChildVisitor(BooleanClause.Occur occur); + + /** + * NoopQueryVisitor is a default implementation of QueryBuilderVisitor. + * When a user does not want to implement QueryBuilderVisitor and have to just pass an empty object then this class will be used. + * + */ + QueryBuilderVisitor NO_OP_VISITOR = new QueryBuilderVisitor() { + @Override + public void accept(QueryBuilder qb) { + // Do nothing + } + + @Override + public QueryBuilderVisitor getChildVisitor(BooleanClause.Occur occur) { + return this; + } + }; + +} diff --git a/server/src/main/java/org/opensearch/index/query/SpanContainingQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/SpanContainingQueryBuilder.java index ed4f5c6848b06..32a19ea3e9b50 100644 --- a/server/src/main/java/org/opensearch/index/query/SpanContainingQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/SpanContainingQueryBuilder.java @@ -34,6 +34,7 @@ import org.apache.lucene.queries.spans.SpanContainingQuery; import org.apache.lucene.queries.spans.SpanQuery; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.Query; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; @@ -188,4 +189,11 @@ protected boolean doEquals(SpanContainingQueryBuilder other) { public String getWriteableName() { return NAME; } + + @Override + public void visit(QueryBuilderVisitor visitor) { + visitor.accept(this); + visitor.getChildVisitor(BooleanClause.Occur.MUST).accept(big); + visitor.getChildVisitor(BooleanClause.Occur.MUST).accept(little); + } } diff --git a/server/src/main/java/org/opensearch/index/query/SpanFirstQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/SpanFirstQueryBuilder.java index 7427b13463284..bcbc64ddf386d 100644 --- a/server/src/main/java/org/opensearch/index/query/SpanFirstQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/SpanFirstQueryBuilder.java @@ -34,6 +34,7 @@ import org.apache.lucene.queries.spans.SpanFirstQuery; import org.apache.lucene.queries.spans.SpanQuery; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.Query; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; @@ -186,4 +187,10 @@ protected boolean doEquals(SpanFirstQueryBuilder other) { public String getWriteableName() { return NAME; } + + @Override + public void visit(QueryBuilderVisitor visitor) { + visitor.accept(this); + visitor.getChildVisitor(BooleanClause.Occur.MUST).accept(matchBuilder); + } } diff --git a/server/src/main/java/org/opensearch/index/query/SpanMultiTermQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/SpanMultiTermQueryBuilder.java index ce28391ca478b..96d03c91964e3 100644 --- a/server/src/main/java/org/opensearch/index/query/SpanMultiTermQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/SpanMultiTermQueryBuilder.java @@ -33,6 +33,7 @@ import org.apache.lucene.queries.SpanMatchNoDocsQuery; import org.apache.lucene.queries.spans.SpanMultiTermQueryWrapper; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.MatchNoDocsQuery; @@ -213,4 +214,12 @@ protected boolean doEquals(SpanMultiTermQueryBuilder other) { public String getWriteableName() { return NAME; } + + @Override + public void visit(QueryBuilderVisitor visitor) { + visitor.accept(this); + if (multiTermQueryBuilder != null) { + visitor.getChildVisitor(BooleanClause.Occur.MUST).accept(multiTermQueryBuilder); + } + } } diff --git a/server/src/main/java/org/opensearch/index/query/SpanNearQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/SpanNearQueryBuilder.java index ba7625d94a5a6..30a1c29c29126 100644 --- a/server/src/main/java/org/opensearch/index/query/SpanNearQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/SpanNearQueryBuilder.java @@ -34,6 +34,7 @@ import org.apache.lucene.queries.spans.SpanNearQuery; import org.apache.lucene.queries.spans.SpanQuery; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.Query; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; @@ -299,6 +300,17 @@ public String getWriteableName() { return NAME; } + @Override + public void visit(QueryBuilderVisitor visitor) { + visitor.accept(this); + if (this.clauses.isEmpty() == false) { + QueryBuilderVisitor subVisitor = visitor.getChildVisitor(BooleanClause.Occur.MUST); + for (QueryBuilder subQb : this.clauses) { + subVisitor.accept(subQb); + } + } + } + /** * SpanGapQueryBuilder enables gaps in a SpanNearQuery. * Since, SpanGapQuery is private to SpanNearQuery, SpanGapQueryBuilder cannot diff --git a/server/src/main/java/org/opensearch/index/query/SpanNotQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/SpanNotQueryBuilder.java index 98e7f287749f5..59ec5b9d77fc8 100644 --- a/server/src/main/java/org/opensearch/index/query/SpanNotQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/SpanNotQueryBuilder.java @@ -34,6 +34,7 @@ import org.apache.lucene.queries.spans.SpanNotQuery; import org.apache.lucene.queries.spans.SpanQuery; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.Query; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; @@ -284,4 +285,16 @@ protected boolean doEquals(SpanNotQueryBuilder other) { public String getWriteableName() { return NAME; } + + @Override + public void visit(QueryBuilderVisitor visitor) { + visitor.accept(this); + if (include != null) { + visitor.getChildVisitor(BooleanClause.Occur.MUST).accept(include); + } + + if (exclude != null) { + visitor.getChildVisitor(BooleanClause.Occur.MUST_NOT).accept(exclude); + } + } } diff --git a/server/src/main/java/org/opensearch/index/query/SpanOrQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/SpanOrQueryBuilder.java index 2f63e6d7403f7..fae1e318c66bd 100644 --- a/server/src/main/java/org/opensearch/index/query/SpanOrQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/SpanOrQueryBuilder.java @@ -34,6 +34,7 @@ import org.apache.lucene.queries.spans.SpanOrQuery; import org.apache.lucene.queries.spans.SpanQuery; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.Query; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; @@ -188,4 +189,15 @@ protected boolean doEquals(SpanOrQueryBuilder other) { public String getWriteableName() { return NAME; } + + @Override + public void visit(QueryBuilderVisitor visitor) { + visitor.accept(this); + if (clauses.isEmpty() == false) { + QueryBuilderVisitor subVisitor = visitor.getChildVisitor(BooleanClause.Occur.SHOULD); + for (QueryBuilder subQb : this.clauses) { + subVisitor.accept(subQb); + } + } + } } diff --git a/server/src/main/java/org/opensearch/index/query/SpanWithinQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/SpanWithinQueryBuilder.java index 5d02cc0026dfd..4d5a6dde61a70 100644 --- a/server/src/main/java/org/opensearch/index/query/SpanWithinQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/SpanWithinQueryBuilder.java @@ -34,6 +34,7 @@ import org.apache.lucene.queries.spans.SpanQuery; import org.apache.lucene.queries.spans.SpanWithinQuery; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.Query; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; @@ -197,4 +198,11 @@ protected boolean doEquals(SpanWithinQueryBuilder other) { public String getWriteableName() { return NAME; } + + @Override + public void visit(QueryBuilderVisitor visitor) { + visitor.accept(this); + visitor.getChildVisitor(BooleanClause.Occur.MUST).accept(big); + visitor.getChildVisitor(BooleanClause.Occur.MUST).accept(little); + } } diff --git a/server/src/main/java/org/opensearch/index/query/functionscore/ScriptScoreQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/functionscore/ScriptScoreQueryBuilder.java index e302ebcee4ba7..fe9ad200d44f0 100644 --- a/server/src/main/java/org/opensearch/index/query/functionscore/ScriptScoreQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/functionscore/ScriptScoreQueryBuilder.java @@ -32,6 +32,7 @@ package org.opensearch.index.query.functionscore; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.Query; import org.opensearch.OpenSearchException; import org.opensearch.common.lucene.search.function.ScriptScoreQuery; @@ -45,6 +46,7 @@ import org.opensearch.index.query.InnerHitContextBuilder; import org.opensearch.index.query.MatchNoneQueryBuilder; import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryBuilderVisitor; import org.opensearch.index.query.QueryRewriteContext; import org.opensearch.index.query.QueryShardContext; import org.opensearch.script.ScoreScript; @@ -224,4 +226,12 @@ protected void extractInnerHitBuilders(Map inner InnerHitContextBuilder.extractInnerHits(query(), innerHits); } + @Override + public void visit(QueryBuilderVisitor visitor) { + visitor.accept(this); + if (query != null) { + QueryBuilderVisitor subVisitor = visitor.getChildVisitor(BooleanClause.Occur.MUST); + subVisitor.accept(query); + } + } } diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java index d18462cd65fc1..05081180bb179 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java @@ -294,6 +294,11 @@ public void updateLatestLocalFileNameLengthMap( Collection segmentFiles, CheckedFunction fileSizeFunction ) { + logger.debug( + "segmentFilesPostRefresh={} latestLocalFileNamesBeforeMapUpdate={}", + segmentFiles, + latestLocalFileNameLengthMap.keySet() + ); // Update the map segmentFiles.stream() .filter(file -> EXCLUDE_FILES.contains(file) == false) diff --git a/server/src/main/java/org/opensearch/index/search/comparators/UnsignedLongComparator.java b/server/src/main/java/org/opensearch/index/search/comparators/UnsignedLongComparator.java index 78b4a5f04c955..d46b34fe97356 100644 --- a/server/src/main/java/org/opensearch/index/search/comparators/UnsignedLongComparator.java +++ b/server/src/main/java/org/opensearch/index/search/comparators/UnsignedLongComparator.java @@ -86,14 +86,6 @@ public void copy(int slot, int doc) throws IOException { super.copy(slot, doc); } - @Override - protected boolean isMissingValueCompetitive() { - int result = missingValue.compareTo(bottom); - // in reverse (desc) sort missingValue is competitive when it's greater or equal to bottom, - // in asc sort missingValue is competitive when it's smaller or equal to bottom - return reverse ? (result >= 0) : (result <= 0); - } - @Override protected void encodeBottom(byte[] packedValue) { BigIntegerPoint.encodeDimension(bottom, packedValue, 0); @@ -103,5 +95,15 @@ protected void encodeBottom(byte[] packedValue) { protected void encodeTop(byte[] packedValue) { BigIntegerPoint.encodeDimension(topValue, packedValue, 0); } + + @Override + protected int compareMissingValueWithBottomValue() { + return missingValue.compareTo(bottom); + } + + @Override + protected int compareMissingValueWithTopValue() { + return missingValue.compareTo(topValue); + } } } diff --git a/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java b/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java index 76b216304b3b7..14aaf7e58a59c 100644 --- a/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java +++ b/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java @@ -33,6 +33,8 @@ package org.opensearch.index.search.stats; import org.opensearch.Version; +import org.opensearch.action.search.SearchPhaseName; +import org.opensearch.action.search.SearchRequestStats; import org.opensearch.common.Nullable; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.FeatureFlags; @@ -58,10 +60,79 @@ public class SearchStats implements Writeable, ToXContentFragment { /** - * Statistics for search + * Holds statistic values for a particular phase. * * @opensearch.internal */ + public static class PhaseStatsLongHolder implements Writeable { + + long current; + long total; + long timeInMillis; + + public long getCurrent() { + return current; + } + + public long getTotal() { + return total; + } + + public long getTimeInMillis() { + return timeInMillis; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(current); + out.writeVLong(total); + out.writeVLong(timeInMillis); + } + + PhaseStatsLongHolder() { + this(0, 0, 0); + } + + PhaseStatsLongHolder(long current, long total, long timeInMillis) { + this.current = current; + this.total = total; + this.timeInMillis = timeInMillis; + } + + PhaseStatsLongHolder(StreamInput in) throws IOException { + this.current = in.readVLong(); + this.total = in.readVLong(); + this.timeInMillis = in.readVLong(); + } + + } + + /** + * Holds requests stats for different phases. + * + * @opensearch.internal + */ + public static class RequestStatsLongHolder { + + Map requestStatsHolder = new HashMap<>(); + + public Map getRequestStatsHolder() { + return requestStatsHolder; + } + + RequestStatsLongHolder() { + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + requestStatsHolder.put(searchPhaseName.getName(), new PhaseStatsLongHolder()); + } + } + } + + /** + * Holder of statistics values + * + * @opensearch.internal + */ + public static class Stats implements Writeable, ToXContentFragment { private long queryCount; @@ -89,6 +160,13 @@ public static class Stats implements Writeable, ToXContentFragment { private long pitTimeInMillis; private long pitCurrent; + @Nullable + private RequestStatsLongHolder requestStatsLongHolder; + + public RequestStatsLongHolder getRequestStatsLongHolder() { + return requestStatsLongHolder; + } + private Stats() { // for internal use, initializes all counts to 0 } @@ -114,6 +192,7 @@ public Stats( long suggestTimeInMillis, long suggestCurrent ) { + this.requestStatsLongHolder = new RequestStatsLongHolder(); this.queryCount = queryCount; this.queryTimeInMillis = queryTimeInMillis; this.queryCurrent = queryCurrent; @@ -163,6 +242,10 @@ private Stats(StreamInput in) throws IOException { pitCurrent = in.readVLong(); } + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + this.requestStatsLongHolder = new RequestStatsLongHolder(); + requestStatsLongHolder.requestStatsHolder = in.readMap(StreamInput::readString, PhaseStatsLongHolder::new); + } if (in.getVersion().onOrAfter(Version.V_2_10_0)) { concurrentQueryCount = in.readVLong(); concurrentQueryTimeInMillis = in.readVLong(); @@ -354,6 +437,17 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(pitCurrent); } + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (requestStatsLongHolder == null) { + requestStatsLongHolder = new RequestStatsLongHolder(); + } + out.writeMap( + requestStatsLongHolder.getRequestStatsHolder(), + StreamOutput::writeString, + (stream, stats) -> stats.writeTo(stream) + ); + } + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { out.writeVLong(concurrentQueryCount); out.writeVLong(concurrentQueryTimeInMillis); @@ -391,6 +485,22 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.humanReadableField(Fields.SUGGEST_TIME_IN_MILLIS, Fields.SUGGEST_TIME, getSuggestTime()); builder.field(Fields.SUGGEST_CURRENT, suggestCurrent); + if (requestStatsLongHolder != null) { + builder.startObject(Fields.REQUEST); + + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + PhaseStatsLongHolder statsLongHolder = requestStatsLongHolder.requestStatsHolder.get(searchPhaseName.getName()); + if (statsLongHolder == null) { + continue; + } + builder.startObject(searchPhaseName.getName()); + builder.humanReadableField(Fields.TIME_IN_MILLIS, Fields.TIME, new TimeValue(statsLongHolder.timeInMillis)); + builder.field(Fields.CURRENT, statsLongHolder.current); + builder.field(Fields.TOTAL, statsLongHolder.total); + builder.endObject(); + } + builder.endObject(); + } return builder; } } @@ -405,6 +515,24 @@ public SearchStats() { totalStats = new Stats(); } + // Set the different Request Stats fields in here + public void setSearchRequestStats(SearchRequestStats searchRequestStats) { + if (totalStats.requestStatsLongHolder == null) { + totalStats.requestStatsLongHolder = new RequestStatsLongHolder(); + } + + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + totalStats.requestStatsLongHolder.requestStatsHolder.put( + searchPhaseName.getName(), + new PhaseStatsLongHolder( + searchRequestStats.getPhaseCurrent(searchPhaseName), + searchRequestStats.getPhaseTotal(searchPhaseName), + searchRequestStats.getPhaseMetric(searchPhaseName) + ) + ); + } + } + public SearchStats(Stats totalStats, long openContexts, @Nullable Map groupStats) { this.totalStats = totalStats; this.openContexts = openContexts; @@ -520,6 +648,12 @@ static final class Fields { static final String SUGGEST_TIME = "suggest_time"; static final String SUGGEST_TIME_IN_MILLIS = "suggest_time_in_millis"; static final String SUGGEST_CURRENT = "suggest_current"; + static final String REQUEST = "request"; + static final String TIME_IN_MILLIS = "time_in_millis"; + static final String TIME = "time"; + static final String CURRENT = "current"; + static final String TOTAL = "total"; + } @Override diff --git a/server/src/main/java/org/opensearch/index/shard/CloseableRetryableRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/CloseableRetryableRefreshListener.java index 9bc105bf13f0a..3ee74e5267718 100644 --- a/server/src/main/java/org/opensearch/index/shard/CloseableRetryableRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/CloseableRetryableRefreshListener.java @@ -104,6 +104,7 @@ protected TimeValue getNextRetryInterval() { private void scheduleRetry(TimeValue interval, String retryThreadPoolName, boolean didRefresh) { // If the underlying listener has closed, then we do not allow even the retry to be scheduled if (closed.get() || isRetryEnabled() == false) { + getLogger().debug("skip retry on closed={} isRetryEnabled={}", closed.get(), isRetryEnabled()); return; } @@ -112,6 +113,7 @@ private void scheduleRetry(TimeValue interval, String retryThreadPoolName, boole // If the retryScheduled is already true, then we return from here itself. If not, then we proceed with scheduling // the retry. if (retryScheduled.getAndSet(true)) { + getLogger().debug("skip retry on retryScheduled=true"); return; } @@ -188,7 +190,7 @@ public final void close() throws IOException { if (semaphore.tryAcquire(TOTAL_PERMITS, 10, TimeUnit.MINUTES)) { boolean result = closed.compareAndSet(false, true); assert result && semaphore.availablePermits() == 0; - getLogger().info("Closed"); + getLogger().info("All permits are acquired and refresh listener is closed"); } else { throw new TimeoutException("timeout while closing gated refresh listener"); } @@ -200,7 +202,6 @@ public final void close() throws IOException { protected abstract Logger getLogger(); // Visible for testing - /** * Returns if the retry is scheduled or not. * diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 34c5ed2112482..d476e8b7c9288 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -62,6 +62,8 @@ import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.opensearch.action.admin.indices.upgrade.post.UpgradeRequest; +import org.opensearch.action.support.GroupedActionListener; +import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.replication.PendingReplicationActions; import org.opensearch.action.support.replication.ReplicationResponse; import org.opensearch.cluster.metadata.DataStream; @@ -96,7 +98,6 @@ import org.opensearch.common.util.concurrent.RunOnce; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.util.io.IOUtils; -import org.opensearch.common.util.set.Sets; import org.opensearch.core.Assertions; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.unit.ByteSizeValue; @@ -199,6 +200,7 @@ import java.nio.channels.FileChannel; import java.nio.charset.StandardCharsets; import java.nio.file.NoSuchFileException; +import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -854,7 +856,7 @@ public void relocated( synchronized (mutex) { verifyRelocatingState(); replicationTracker.completeRelocationHandoff(); // make changes to primaryMode and relocated flag only under - // mutex + // mutex } } catch (final Exception e) { try { @@ -3506,6 +3508,7 @@ public void startRecovery( // } // }} // } + logger.debug("startRecovery type={}", recoveryState.getRecoverySource().getType()); assert recoveryState.getRecoverySource().equals(shardRouting.recoverySource()); switch (recoveryState.getRecoverySource().getType()) { case EMPTY_STORE: @@ -3790,7 +3793,7 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) thro replicationTracker::isPrimaryMode, translogFactorySupplier.apply(indexSettings, shardRouting), isTimeSeriesDescSortOptimizationEnabled() ? DataStream.TIMESERIES_LEAF_SORTER : null // DESC @timestamp default order for - // timeseries + // timeseries ); } @@ -4723,11 +4726,21 @@ public void syncTranslogFilesFromRemoteTranslog() throws IOException { } /** - * Downloads segments from remote segment store. - * @param overrideLocal flag to override local segment files with those in remote store - * @throws IOException if exception occurs while reading segments from remote store + * Downloads segments from remote segment store + * @param overrideLocal flag to override local segment files with those in remote store. + * @throws IOException if exception occurs while reading segments from remote store. */ public void syncSegmentsFromRemoteSegmentStore(boolean overrideLocal) throws IOException { + syncSegmentsFromRemoteSegmentStore(overrideLocal, () -> {}); + } + + /** + * Downloads segments from remote segment store along with updating the access time of the recovery target. + * @param overrideLocal flag to override local segment files with those in remote store. + * @param onFileSync runnable that updates the access time when run. + * @throws IOException if exception occurs while reading segments from remote store. + */ + public void syncSegmentsFromRemoteSegmentStore(boolean overrideLocal, final Runnable onFileSync) throws IOException { assert indexSettings.isRemoteStoreEnabled(); logger.trace("Downloading segments from remote segment store"); RemoteSegmentStoreDirectory remoteDirectory = getRemoteDirectory(); @@ -4758,7 +4771,7 @@ public void syncSegmentsFromRemoteSegmentStore(boolean overrideLocal) throws IOE } else { storeDirectory = store.directory(); } - copySegmentFiles(storeDirectory, remoteDirectory, null, uploadedSegments, overrideLocal); + copySegmentFiles(storeDirectory, remoteDirectory, null, uploadedSegments, overrideLocal, onFileSync); if (remoteSegmentMetadata != null) { final SegmentInfos infosSnapshot = store.buildSegmentInfos( @@ -4818,7 +4831,8 @@ public void syncSegmentsFromGivenRemoteSegmentStore( sourceRemoteDirectory, remoteDirectory, uploadedSegments, - overrideLocal + overrideLocal, + () -> {} ); if (segmentsNFile != null) { try ( @@ -4851,41 +4865,75 @@ private String copySegmentFiles( RemoteSegmentStoreDirectory sourceRemoteDirectory, RemoteSegmentStoreDirectory targetRemoteDirectory, Map uploadedSegments, - boolean overrideLocal + boolean overrideLocal, + final Runnable onFileSync ) throws IOException { - List downloadedSegments = new ArrayList<>(); - List skippedSegments = new ArrayList<>(); + Set toDownloadSegments = new HashSet<>(); + Set skippedSegments = new HashSet<>(); String segmentNFile = null; + try { - Set localSegmentFiles = Sets.newHashSet(storeDirectory.listAll()); if (overrideLocal) { - for (String file : localSegmentFiles) { + for (String file : storeDirectory.listAll()) { storeDirectory.deleteFile(file); } } + for (String file : uploadedSegments.keySet()) { long checksum = Long.parseLong(uploadedSegments.get(file).getChecksum()); if (overrideLocal || localDirectoryContains(storeDirectory, file, checksum) == false) { - storeDirectory.copyFrom(sourceRemoteDirectory, file, file, IOContext.DEFAULT); - downloadedSegments.add(file); + toDownloadSegments.add(file); } else { skippedSegments.add(file); } - if (targetRemoteDirectory != null) { - targetRemoteDirectory.copyFrom(storeDirectory, file, file, IOContext.DEFAULT); - } + if (file.startsWith(IndexFileNames.SEGMENTS)) { assert segmentNFile == null : "There should be only one SegmentInfosSnapshot file"; segmentNFile = file; } } + + if (toDownloadSegments.isEmpty() == false) { + try { + downloadSegments(storeDirectory, sourceRemoteDirectory, targetRemoteDirectory, toDownloadSegments, onFileSync); + } catch (Exception e) { + throw new IOException("Error occurred when downloading segments from remote store", e); + } + } } finally { - logger.trace("Downloaded segments here: {}", downloadedSegments); + logger.trace("Downloaded segments here: {}", toDownloadSegments); logger.trace("Skipped download for segments here: {}", skippedSegments); } + return segmentNFile; } + private void downloadSegments( + Directory storeDirectory, + RemoteSegmentStoreDirectory sourceRemoteDirectory, + RemoteSegmentStoreDirectory targetRemoteDirectory, + Set toDownloadSegments, + final Runnable onFileSync + ) { + final PlainActionFuture completionListener = PlainActionFuture.newFuture(); + final GroupedActionListener batchDownloadListener = new GroupedActionListener<>( + ActionListener.map(completionListener, v -> null), + toDownloadSegments.size() + ); + + final ActionListener segmentsDownloadListener = ActionListener.map(batchDownloadListener, fileName -> { + onFileSync.run(); + if (targetRemoteDirectory != null) { + targetRemoteDirectory.copyFrom(storeDirectory, fileName, fileName, IOContext.DEFAULT); + } + return null; + }); + + final Path indexPath = store.shardPath() == null ? null : store.shardPath().resolveIndex(); + toDownloadSegments.forEach(file -> { sourceRemoteDirectory.copyTo(file, storeDirectory, indexPath, segmentsDownloadListener); }); + completionListener.actionGet(); + } + private boolean localDirectoryContains(Directory localDirectory, String file, long checksum) { try (IndexInput indexInput = localDirectory.openInput(file, IOContext.DEFAULT)) { if (checksum == CodecUtil.retrieveChecksum(indexInput)) { diff --git a/server/src/main/java/org/opensearch/index/shard/IndexingStats.java b/server/src/main/java/org/opensearch/index/shard/IndexingStats.java index 8953ef38da51b..f1abea81a6511 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexingStats.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexingStats.java @@ -37,6 +37,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -44,6 +45,7 @@ import java.io.IOException; import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; /** * Tracks indexing statistics @@ -59,6 +61,89 @@ public class IndexingStats implements Writeable, ToXContentFragment { */ public static class Stats implements Writeable, ToXContentFragment { + /** + * Tracks item level rest category class codes during indexing + * + * @opensearch.internal + */ + public static class DocStatusStats implements Writeable, ToXContentFragment { + + final AtomicLong[] docStatusCounter; + + public DocStatusStats() { + docStatusCounter = new AtomicLong[5]; + for (int i = 0; i < docStatusCounter.length; ++i) { + docStatusCounter[i] = new AtomicLong(0); + } + } + + public DocStatusStats(StreamInput in) throws IOException { + docStatusCounter = in.readArray(i -> new AtomicLong(i.readLong()), AtomicLong[]::new); + + assert docStatusCounter.length == 5 : "Length of incoming array should be 5! Got " + docStatusCounter.length; + } + + /** + * Increment counter for status + * + * @param status {@link RestStatus} + */ + public void inc(final RestStatus status) { + add(status, 1L); + } + + /** + * Increment counter for status by count + * + * @param status {@link RestStatus} + * @param delta The value to add + */ + void add(final RestStatus status, final long delta) { + docStatusCounter[status.getStatusFamilyCode() - 1].addAndGet(delta); + } + + /** + * Accumulate stats from the passed Object + * + * @param stats Instance storing {@link DocStatusStats} + */ + public void add(final DocStatusStats stats) { + if (null == stats) { + return; + } + + for (int i = 0; i < docStatusCounter.length; ++i) { + docStatusCounter[i].addAndGet(stats.docStatusCounter[i].longValue()); + } + } + + public AtomicLong[] getDocStatusCounter() { + return docStatusCounter; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(Fields.DOC_STATUS); + + for (int i = 0; i < docStatusCounter.length; ++i) { + long value = docStatusCounter[i].longValue(); + + if (value > 0) { + String key = i + 1 + "xx"; + builder.field(key, value); + } + } + + return builder.endObject(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeArray((o, v) -> o.writeLong(v.longValue()), docStatusCounter); + } + + } + private long indexCount; private long indexTimeInMillis; private long indexCurrent; @@ -69,8 +154,11 @@ public static class Stats implements Writeable, ToXContentFragment { private long noopUpdateCount; private long throttleTimeInMillis; private boolean isThrottled; + private final DocStatusStats docStatusStats; - Stats() {} + Stats() { + docStatusStats = new DocStatusStats(); + } public Stats(StreamInput in) throws IOException { indexCount = in.readVLong(); @@ -83,6 +171,12 @@ public Stats(StreamInput in) throws IOException { noopUpdateCount = in.readVLong(); isThrottled = in.readBoolean(); throttleTimeInMillis = in.readLong(); + + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + docStatusStats = in.readOptionalWriteable(DocStatusStats::new); + } else { + docStatusStats = null; + } } public Stats( @@ -95,7 +189,8 @@ public Stats( long deleteCurrent, long noopUpdateCount, boolean isThrottled, - long throttleTimeInMillis + long throttleTimeInMillis, + DocStatusStats docStatusStats ) { this.indexCount = indexCount; this.indexTimeInMillis = indexTimeInMillis; @@ -107,6 +202,7 @@ public Stats( this.noopUpdateCount = noopUpdateCount; this.isThrottled = isThrottled; this.throttleTimeInMillis = throttleTimeInMillis; + this.docStatusStats = docStatusStats; } public void add(Stats stats) { @@ -121,8 +217,10 @@ public void add(Stats stats) { noopUpdateCount += stats.noopUpdateCount; throttleTimeInMillis += stats.throttleTimeInMillis; - if (isThrottled != stats.isThrottled) { - isThrottled = true; // When combining if one is throttled set result to throttled. + isThrottled |= stats.isThrottled; // When combining if one is throttled set result to throttled. + + if (getDocStatusStats() != null) { + getDocStatusStats().add(stats.getDocStatusStats()); } } @@ -193,6 +291,10 @@ public long getNoopUpdateCount() { return noopUpdateCount; } + public DocStatusStats getDocStatusStats() { + return docStatusStats; + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeVLong(indexCount); @@ -206,6 +308,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(isThrottled); out.writeLong(throttleTimeInMillis); + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeOptionalWriteable(docStatusStats); + } } @Override @@ -223,8 +328,14 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(Fields.IS_THROTTLED, isThrottled); builder.humanReadableField(Fields.THROTTLED_TIME_IN_MILLIS, Fields.THROTTLED_TIME, getThrottleTime()); + + if (getDocStatusStats() != null) { + getDocStatusStats().toXContent(builder, params); + } + return builder; } + } private final Stats totalStats; @@ -279,7 +390,7 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par * * @opensearch.internal */ - static final class Fields { + private static final class Fields { static final String INDEXING = "indexing"; static final String INDEX_TOTAL = "index_total"; static final String INDEX_TIME = "index_time"; @@ -294,6 +405,7 @@ static final class Fields { static final String IS_THROTTLED = "is_throttled"; static final String THROTTLED_TIME_IN_MILLIS = "throttle_time_in_millis"; static final String THROTTLED_TIME = "throttle_time"; + static final String DOC_STATUS = "doc_status"; } @Override @@ -303,4 +415,5 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(false); } } + } diff --git a/server/src/main/java/org/opensearch/index/shard/InternalIndexingStats.java b/server/src/main/java/org/opensearch/index/shard/InternalIndexingStats.java index d7e15dd3e40f5..55b65bb4be6d8 100644 --- a/server/src/main/java/org/opensearch/index/shard/InternalIndexingStats.java +++ b/server/src/main/java/org/opensearch/index/shard/InternalIndexingStats.java @@ -154,7 +154,8 @@ IndexingStats.Stats stats(boolean isThrottled, long currentThrottleMillis) { deleteCurrent.count(), noopUpdates.count(), isThrottled, - TimeUnit.MILLISECONDS.toMillis(currentThrottleMillis) + TimeUnit.MILLISECONDS.toMillis(currentThrottleMillis), + new IndexingStats.Stats.DocStatusStats() ); } } diff --git a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java index 4e035af9bbc5e..695c01367171a 100644 --- a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java @@ -127,6 +127,7 @@ protected void runAfterRefreshExactlyOnce(boolean didRefresh) { segmentTracker.updateLocalRefreshTimeAndSeqNo(); try { if (this.primaryTerm != indexShard.getOperationPrimaryTerm()) { + logger.debug("primaryTerm update from={} to={}", primaryTerm, indexShard.getOperationPrimaryTerm()); this.primaryTerm = indexShard.getOperationPrimaryTerm(); this.remoteDirectory.init(); } @@ -150,8 +151,6 @@ protected void runAfterRefreshExactlyOnce(boolean didRefresh) { @Override protected boolean performAfterRefreshWithPermit(boolean didRefresh) { boolean successful; - // The third condition exists for uploading the zero state segments where the refresh has not changed the reader reference, but it - // is important to upload the zero state segments so that the restore does not break. if (shouldSync(didRefresh)) { successful = syncSegments(); } else { @@ -161,6 +160,8 @@ protected boolean performAfterRefreshWithPermit(boolean didRefresh) { } private boolean shouldSync(boolean didRefresh) { + // The third condition exists for uploading the zero state segments where the refresh has not changed the reader reference, but it + // is important to upload the zero state segments so that the restore does not break. return this.primaryTerm != indexShard.getOperationPrimaryTerm() || didRefresh || remoteDirectory.getSegmentsUploadedToRemoteStore().isEmpty(); @@ -168,7 +169,7 @@ private boolean shouldSync(boolean didRefresh) { private boolean syncSegments() { if (indexShard.getReplicationTracker().isPrimaryMode() == false || indexShard.state() == IndexShardState.CLOSED) { - logger.trace( + logger.debug( "Skipped syncing segments with primaryMode={} indexShardState={}", indexShard.getReplicationTracker().isPrimaryMode(), indexShard.state() @@ -178,10 +179,7 @@ private boolean syncSegments() { // primaryMode to true. Due to this, the refresh that is triggered post replay of translog will not go through // if following condition does not exist. The segments created as part of translog replay will not be present // in the remote store. - if (indexShard.state() == IndexShardState.STARTED && indexShard.getEngine() instanceof InternalEngine) { - return false; - } - return true; + return indexShard.state() != IndexShardState.STARTED || !(indexShard.getEngine() instanceof InternalEngine); } ReplicationCheckpoint checkpoint = indexShard.getLatestReplicationCheckpoint(); beforeSegmentsSync(); @@ -217,8 +215,10 @@ private boolean syncSegments() { @Override public void onResponse(Void unused) { try { + logger.debug("New segments upload successful"); // Start metadata file upload uploadMetadata(localSegmentsPostRefresh, segmentInfos, checkpoint); + logger.debug("Metadata upload successful"); clearStaleFilesFromLocalSegmentChecksumMap(localSegmentsPostRefresh); onSuccessfulSegmentsSync( refreshTimeMs, @@ -262,6 +262,7 @@ public void onFailure(Exception e) { updateFinalStatusInSegmentTracker(successful.get(), bytesBeforeUpload, startTimeInNS); // If there are failures in uploading segments, then we should retry as search idle can lead to // refresh not occurring until write happens. + logger.debug("syncSegments runStatus={}", successful.get()); return successful.get(); } @@ -300,6 +301,7 @@ private void onSuccessfulSegmentsSync( indexShard.getEngine().translogManager().setMinSeqNoToKeep(lastRefreshedCheckpoint + 1); // Publishing the new checkpoint which is used for remote store + segrep indexes checkpointPublisher.publish(indexShard, checkpoint); + logger.debug("onSuccessfulSegmentsSync lastRefreshedCheckpoint={} checkpoint={}", lastRefreshedCheckpoint, checkpoint); } /** @@ -352,10 +354,12 @@ void uploadMetadata(Collection localSegmentsPostRefresh, SegmentInfos se private void uploadNewSegments(Collection localSegmentsPostRefresh, ActionListener listener) { Collection filteredFiles = localSegmentsPostRefresh.stream().filter(file -> !skipUpload(file)).collect(Collectors.toList()); if (filteredFiles.size() == 0) { + logger.debug("No new segments to upload in uploadNewSegments"); listener.onResponse(null); return; } + logger.debug("Effective new segments files to upload {}", filteredFiles); ActionListener> mappedListener = ActionListener.map(listener, resp -> null); GroupedActionListener batchUploadListener = new GroupedActionListener<>(mappedListener, filteredFiles.size()); diff --git a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java index 6d675b709e05b..c0211e1257c8e 100644 --- a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java @@ -399,7 +399,7 @@ void recoverFromSnapshotAndRemoteStore( RemoteSegmentStoreDirectory sourceRemoteDirectory = (RemoteSegmentStoreDirectory) directoryFactory.newDirectory( remoteStoreRepository, indexUUID, - String.valueOf(shardId.id()) + shardId ); indexShard.syncSegmentsFromGivenRemoteSegmentStore(true, sourceRemoteDirectory, primaryTerm, commitGeneration); final Store store = indexShard.store(); @@ -536,7 +536,6 @@ private void recoverFromRemoteStore(IndexShard indexShard) throws IndexShardReco try { // Download segments from remote segment store indexShard.syncSegmentsFromRemoteSegmentStore(true); - indexShard.syncTranslogFilesFromRemoteTranslog(); // On index creation, the only segment file that is created is segments_N. We can safely discard this file diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java index 266552cea93d9..b23d2d7d0a3f8 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -24,9 +24,12 @@ import org.apache.lucene.store.IndexOutput; import org.apache.lucene.util.Version; import org.opensearch.common.UUIDs; +import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; import org.opensearch.common.io.VersionedCodecStreamWrapper; +import org.opensearch.common.logging.Loggers; import org.opensearch.common.lucene.store.ByteArrayIndexInput; import org.opensearch.core.action.ActionListener; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.remote.RemoteStoreUtils; import org.opensearch.index.store.lockmanager.FileLockInfo; import org.opensearch.index.store.lockmanager.RemoteStoreCommitLevelLockManager; @@ -40,6 +43,8 @@ import java.io.IOException; import java.io.InputStream; import java.nio.file.NoSuchFileException; +import java.nio.file.Path; +import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -97,7 +102,9 @@ public final class RemoteSegmentStoreDirectory extends FilterDirectory implement RemoteSegmentMetadata.METADATA_CODEC ); - private static final Logger logger = LogManager.getLogger(RemoteSegmentStoreDirectory.class); + private static final Logger staticLogger = LogManager.getLogger(RemoteSegmentStoreDirectory.class); + + private final Logger logger; /** * AtomicBoolean that ensures only one staleCommitDeletion activity is scheduled at a time. @@ -111,13 +118,15 @@ public RemoteSegmentStoreDirectory( RemoteDirectory remoteDataDirectory, RemoteDirectory remoteMetadataDirectory, RemoteStoreLockManager mdLockManager, - ThreadPool threadPool + ThreadPool threadPool, + ShardId shardId ) throws IOException { super(remoteDataDirectory); this.remoteDataDirectory = remoteDataDirectory; this.remoteMetadataDirectory = remoteMetadataDirectory; this.mdLockManager = mdLockManager; this.threadPool = threadPool; + this.logger = Loggers.getLogger(getClass(), shardId); init(); } @@ -130,12 +139,14 @@ public RemoteSegmentStoreDirectory( * @throws IOException if there were any failures in reading the metadata file */ public RemoteSegmentMetadata init() throws IOException { + logger.debug("Start initialisation of remote segment metadata"); RemoteSegmentMetadata remoteSegmentMetadata = readLatestMetadataFile(); if (remoteSegmentMetadata != null) { this.segmentsUploadedToRemoteStore = new ConcurrentHashMap<>(remoteSegmentMetadata.getMetadata()); } else { this.segmentsUploadedToRemoteStore = new ConcurrentHashMap<>(); } + logger.debug("Initialisation of remote segment metadata completed"); return remoteSegmentMetadata; } @@ -248,7 +259,7 @@ public static UploadedSegmentMetadata fromString(String uploadedFilename) { String[] values = uploadedFilename.split(SEPARATOR); UploadedSegmentMetadata metadata = new UploadedSegmentMetadata(values[0], values[1], values[2], Long.parseLong(values[3])); if (values.length < 5) { - logger.error("Lucene version is missing for UploadedSegmentMetadata: " + uploadedFilename); + staticLogger.error("Lucene version is missing for UploadedSegmentMetadata: " + uploadedFilename); } metadata.setWrittenByMajor(Integer.parseInt(values[4])); @@ -434,6 +445,41 @@ public void copyFrom(Directory from, String src, IOContext context, ActionListen } } + /** + * Copies an existing {@code source} file from this directory to a non-existent file (also + * named {@code source}) in either {@code destinationDirectory} or {@code destinationPath}. + * If the blob container backing this directory supports multipart downloads, the {@code source} + * file will be downloaded (potentially in multiple concurrent parts) directly to + * {@code destinationPath}. This method will return immediately and {@code fileCompletionListener} + * will be notified upon completion. + *

+ * If multipart downloads are not supported, then {@code source} file will be copied to a file named + * {@code source} in a single part to {@code destinationDirectory}. The download will happen on the + * calling thread and {@code fileCompletionListener} will be notified synchronously before this + * method returns. + * + * @param source The source file name + * @param destinationDirectory The destination directory (if multipart is not supported) + * @param destinationPath The destination path (if multipart is supported) + * @param fileCompletionListener The listener to notify of completion + */ + public void copyTo(String source, Directory destinationDirectory, Path destinationPath, ActionListener fileCompletionListener) { + final String blobName = getExistingRemoteFilename(source); + if (destinationPath != null && remoteDataDirectory.getBlobContainer() instanceof AsyncMultiStreamBlobContainer) { + final AsyncMultiStreamBlobContainer blobContainer = (AsyncMultiStreamBlobContainer) remoteDataDirectory.getBlobContainer(); + final Path destinationFilePath = destinationPath.resolve(source); + blobContainer.asyncBlobDownload(blobName, destinationFilePath, threadPool, fileCompletionListener); + } else { + // Fallback to older mechanism of downloading the file + try { + destinationDirectory.copyFrom(this, source, source, IOContext.DEFAULT); + fileCompletionListener.onResponse(source); + } catch (IOException e) { + fileCompletionListener.onFailure(e); + } + } + } + /** * This acquires a lock on a given commit by creating a lock file in lock directory using {@code FileLockInfo} * @@ -641,7 +687,7 @@ private Map getSegmentToLuceneVersion(Collection segmen */ private void tryAndDeleteLocalFile(String filename, Directory directory) { try { - logger.trace("Deleting file: " + filename); + logger.debug("Deleting file: " + filename); directory.deleteFile(filename); } catch (NoSuchFileException | FileNotFoundException e) { logger.trace("Exception while deleting. Missing file : " + filename, e); @@ -691,7 +737,7 @@ public void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException Integer.MAX_VALUE ); if (sortedMetadataFileList.size() <= lastNMetadataFilesToKeep) { - logger.trace( + logger.debug( "Number of commits in remote segment store={}, lastNMetadataFilesToKeep={}", sortedMetadataFileList.size(), lastNMetadataFilesToKeep @@ -719,6 +765,11 @@ public void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException }).collect(Collectors.toList()); sortedMetadataFileList.removeAll(metadataFilesToBeDeleted); + logger.debug( + "metadataFilesEligibleToDelete={} metadataFilesToBeDeleted={}", + metadataFilesEligibleToDelete, + metadataFilesEligibleToDelete + ); Map activeSegmentFilesMetadataMap = new HashMap<>(); Set activeSegmentRemoteFilenames = new HashSet<>(); @@ -736,9 +787,11 @@ public void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException .map(metadata -> metadata.uploadedFilename) .collect(Collectors.toSet()); AtomicBoolean deletionSuccessful = new AtomicBoolean(true); + List nonActiveDeletedSegmentFiles = new ArrayList<>(); staleSegmentRemoteFilenames.stream().filter(file -> !activeSegmentRemoteFilenames.contains(file)).forEach(file -> { try { remoteDataDirectory.deleteFile(file); + nonActiveDeletedSegmentFiles.add(file); if (!activeSegmentFilesMetadataMap.containsKey(getLocalSegmentFilename(file))) { segmentsUploadedToRemoteStore.remove(getLocalSegmentFilename(file)); } @@ -753,8 +806,9 @@ public void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException ); } }); + logger.debug("nonActiveDeletedSegmentFiles={}", nonActiveDeletedSegmentFiles); if (deletionSuccessful.get()) { - logger.trace("Deleting stale metadata file {} from remote segment store", metadataFile); + logger.debug("Deleting stale metadata file {} from remote segment store", metadataFile); remoteMetadataDirectory.deleteFile(metadataFile); } } @@ -805,7 +859,7 @@ private boolean deleteIfEmpty() throws IOException { 1 ); if (metadataFiles.size() != 0) { - logger.info("Remote directory still has files , not deleting the path"); + logger.info("Remote directory still has files, not deleting the path"); return false; } @@ -821,6 +875,7 @@ private boolean deleteIfEmpty() throws IOException { return true; } + @Override public void close() throws IOException { deleteStaleSegmentsAsync(0, ActionListener.wrap(r -> deleteIfEmpty(), e -> logger.error("Failed to cleanup remote directory"))); } diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java index 31b49f6813ad2..490b07e441702 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java @@ -10,6 +10,7 @@ import org.apache.lucene.store.Directory; import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.ShardPath; import org.opensearch.index.store.lockmanager.RemoteStoreLockManager; @@ -45,17 +46,15 @@ public RemoteSegmentStoreDirectoryFactory(Supplier reposito public Directory newDirectory(IndexSettings indexSettings, ShardPath path) throws IOException { String repositoryName = indexSettings.getRemoteStoreRepository(); String indexUUID = indexSettings.getIndex().getUUID(); - String shardId = String.valueOf(path.getShardId().getId()); - - return newDirectory(repositoryName, indexUUID, shardId); + return newDirectory(repositoryName, indexUUID, path.getShardId()); } - public Directory newDirectory(String repositoryName, String indexUUID, String shardId) throws IOException { + public Directory newDirectory(String repositoryName, String indexUUID, ShardId shardId) throws IOException { try (Repository repository = repositoriesService.get().repository(repositoryName)) { assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; BlobStoreRepository blobStoreRepository = ((BlobStoreRepository) repository); BlobPath commonBlobPath = blobStoreRepository.basePath(); - commonBlobPath = commonBlobPath.add(indexUUID).add(shardId).add(SEGMENTS); + commonBlobPath = commonBlobPath.add(indexUUID).add(String.valueOf(shardId.id())).add(SEGMENTS); RemoteDirectory dataDirectory = new RemoteDirectory( blobStoreRepository.blobStore().blobContainer(commonBlobPath.add("data")), @@ -69,10 +68,10 @@ public Directory newDirectory(String repositoryName, String indexUUID, String sh repositoriesService.get(), repositoryName, indexUUID, - shardId + String.valueOf(shardId.id()) ); - return new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory, mdLockManager, threadPool); + return new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory, mdLockManager, threadPool, shardId); } catch (RepositoryMissingException e) { throw new IllegalArgumentException("Repository should be created before creating index with remote_store enabled setting", e); } diff --git a/server/src/main/java/org/opensearch/index/store/Store.java b/server/src/main/java/org/opensearch/index/store/Store.java index b3ea2cdd02e21..285241ba89996 100644 --- a/server/src/main/java/org/opensearch/index/store/Store.java +++ b/server/src/main/java/org/opensearch/index/store/Store.java @@ -66,6 +66,7 @@ import org.apache.lucene.util.Version; import org.opensearch.ExceptionsHelper; import org.opensearch.common.UUIDs; +import org.opensearch.common.annotation.InternalApi; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.logging.Loggers; import org.opensearch.common.lucene.Lucene; @@ -92,6 +93,7 @@ import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.AbstractIndexShardComponent; import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.ShardPath; import org.opensearch.index.translog.Translog; import java.io.Closeable; @@ -179,6 +181,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref private final ReentrantReadWriteLock metadataLock = new ReentrantReadWriteLock(); private final ShardLock shardLock; private final OnClose onClose; + private final ShardPath shardPath; // used to ref count files when a new Reader is opened for PIT/Scroll queries // prevents segment files deletion until the PIT/Scroll expires or is discarded @@ -192,10 +195,17 @@ protected void closeInternal() { }; public Store(ShardId shardId, IndexSettings indexSettings, Directory directory, ShardLock shardLock) { - this(shardId, indexSettings, directory, shardLock, OnClose.EMPTY); + this(shardId, indexSettings, directory, shardLock, OnClose.EMPTY, null); } - public Store(ShardId shardId, IndexSettings indexSettings, Directory directory, ShardLock shardLock, OnClose onClose) { + public Store( + ShardId shardId, + IndexSettings indexSettings, + Directory directory, + ShardLock shardLock, + OnClose onClose, + ShardPath shardPath + ) { super(shardId, indexSettings); final TimeValue refreshInterval = indexSettings.getValue(INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING); logger.debug("store stats are refreshed with refresh_interval [{}]", refreshInterval); @@ -203,6 +213,7 @@ public Store(ShardId shardId, IndexSettings indexSettings, Directory directory, this.directory = new StoreDirectory(sizeCachingDir, Loggers.getLogger("index.store.deletes", shardId)); this.shardLock = shardLock; this.onClose = onClose; + this.shardPath = shardPath; assert onClose != null; assert shardLock != null; assert shardLock.getShardId().equals(shardId); @@ -213,6 +224,11 @@ public Directory directory() { return directory; } + @InternalApi + public ShardPath shardPath() { + return shardPath; + } + /** * Returns the last committed segments info for this store * diff --git a/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java b/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java index 6b51cb17dcc41..4bc9a711894b7 100644 --- a/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java +++ b/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java @@ -8,10 +8,10 @@ package org.opensearch.index.translog; -import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.AlreadyClosedException; +import org.opensearch.common.logging.Loggers; import org.opensearch.common.util.concurrent.ReleasableLock; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.index.shard.ShardId; @@ -43,7 +43,7 @@ public class InternalTranslogManager implements TranslogManager, Closeable { private final AtomicBoolean pendingTranslogRecovery = new AtomicBoolean(false); private final TranslogEventListener translogEventListener; private final Supplier localCheckpointTrackerSupplier; - private static final Logger logger = LogManager.getLogger(InternalTranslogManager.class); + private final Logger logger; public InternalTranslogManager( TranslogConfig translogConfig, @@ -76,6 +76,7 @@ public InternalTranslogManager( assert pendingTranslogRecovery.get() == false : "translog recovery can't be pending before we set it"; // don't allow commits until we are done with recovering pendingTranslogRecovery.set(true); + this.logger = Loggers.getLogger(getClass(), shardId); } /** diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java index c488127857ed5..004d1bfdca36d 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java @@ -11,7 +11,6 @@ import org.apache.logging.log4j.Logger; import org.opensearch.common.SetOnce; import org.opensearch.common.lease.Releasable; -import org.opensearch.common.lease.Releasables; import org.opensearch.common.logging.Loggers; import org.opensearch.common.util.concurrent.ReleasableLock; import org.opensearch.common.util.io.IOUtils; @@ -182,11 +181,12 @@ static void download(TranslogTransferManager translogTransferManager, Path locat ex = e; } } + logger.debug("Exhausted all download retries during translog/checkpoint file download"); throw ex; } static private void downloadOnce(TranslogTransferManager translogTransferManager, Path location, Logger logger) throws IOException { - logger.trace("Downloading translog files from remote"); + logger.debug("Downloading translog files from remote"); RemoteTranslogTransferTracker statsTracker = translogTransferManager.getRemoteTranslogTransferTracker(); long prevDownloadBytesSucceeded = statsTracker.getDownloadBytesSucceeded(); long prevDownloadTimeInMillis = statsTracker.getTotalDownloadTimeInMillis(); @@ -206,6 +206,11 @@ static private void downloadOnce(TranslogTransferManager translogTransferManager String generation = Long.toString(i); translogTransferManager.downloadTranslog(generationToPrimaryTermMapper.get(generation), generation, location); } + logger.info( + "Downloaded translog and checkpoint files from={} to={}", + translogMetadata.getMinTranslogGeneration(), + translogMetadata.getGeneration() + ); statsTracker.recordDownloadStats(prevDownloadBytesSucceeded, prevDownloadTimeInMillis); @@ -216,7 +221,7 @@ static private void downloadOnce(TranslogTransferManager translogTransferManager location.resolve(Translog.CHECKPOINT_FILE_NAME) ); } - logger.trace("Downloaded translog files from remote"); + logger.debug("downloadOnce execution completed"); } public static TranslogTransferManager buildTranslogTransferManager( @@ -237,7 +242,7 @@ public static TranslogTransferManager buildTranslogTransferManager( @Override public boolean ensureSynced(Location location) throws IOException { - try (ReleasableLock ignored = writeLock.acquire()) { + try { assert location.generation <= current.getGeneration(); if (location.generation == current.getGeneration()) { ensureOpen(); @@ -270,6 +275,11 @@ private boolean prepareAndUpload(Long primaryTerm, Long generation) throws IOExc logger.trace("Creating new writer for gen: [{}]", current.getGeneration() + 1); current = createWriter(current.getGeneration() + 1); } + assert writeLock.isHeldByCurrentThread() : "Write lock must be held before we acquire the read lock"; + // Here we are downgrading the write lock by acquiring the read lock and releasing the write lock + // This ensures that other threads can still acquire the read locks while also protecting the + // readers and writer to not be mutated any further. + readLock.acquire(); } catch (final Exception e) { tragedy.setTragicException(e); closeOnTragicEvent(e); @@ -278,7 +288,10 @@ private boolean prepareAndUpload(Long primaryTerm, Long generation) throws IOExc } else if (generation < current.getGeneration()) { return false; } + } + assert readLock.isHeldByCurrentThread() == true; + try (Releasable ignored = readLock; Releasable ignoredGenLock = deletionPolicy.acquireTranslogGen(getMinFileGeneration())) { // Do we need remote writes in sync fashion ? // If we don't , we should swallow FileAlreadyExistsException while writing to remote store // and also verify for same during primary-primary relocation @@ -303,7 +316,7 @@ private boolean upload(Long primaryTerm, Long generation) throws IOException { // primary, the engine is reset to InternalEngine which also initialises the RemoteFsTranslog which in turns // downloads all the translogs from remote store and does a flush before the relocation finishes. if (primaryModeSupplier.getAsBoolean() == false) { - logger.trace("skipped uploading translog for {} {}", primaryTerm, generation); + logger.debug("skipped uploading translog for {} {}", primaryTerm, generation); // NO-OP return true; } @@ -317,10 +330,9 @@ private boolean upload(Long primaryTerm, Long generation) throws IOException { Translog::getCommitCheckpointFileName ).build() ) { - Releasable transferReleasable = Releasables.wrap(deletionPolicy.acquireTranslogGen(getMinFileGeneration())); return translogTransferManager.transferSnapshot( transferSnapshotProvider, - new RemoteFsTranslogTransferListener(transferReleasable, generation, primaryTerm) + new RemoteFsTranslogTransferListener(generation, primaryTerm) ); } @@ -499,16 +511,17 @@ protected void onDelete() { translogTransferManager.delete(); } + // Visible for testing + boolean isRemoteGenerationDeletionPermitsAvailable() { + return remoteGenerationDeletionPermits.availablePermits() == REMOTE_DELETION_PERMITS; + } + /** * TranslogTransferListener implementation for RemoteFsTranslog * * @opensearch.internal */ private class RemoteFsTranslogTransferListener implements TranslogTransferListener { - /** - * Releasable instance for the translog - */ - private final Releasable transferReleasable; /** * Generation for the translog @@ -520,16 +533,13 @@ private class RemoteFsTranslogTransferListener implements TranslogTransferListen */ private final Long primaryTerm; - RemoteFsTranslogTransferListener(Releasable transferReleasable, Long generation, Long primaryTerm) { - this.transferReleasable = transferReleasable; + RemoteFsTranslogTransferListener(Long generation, Long primaryTerm) { this.generation = generation; this.primaryTerm = primaryTerm; } @Override public void onUploadComplete(TransferSnapshot transferSnapshot) throws IOException { - transferReleasable.close(); - closeFilesIfNoPendingRetentionLocks(); maxRemoteTranslogGenerationUploaded = generation; minRemoteGenReferenced = getMinFileGeneration(); logger.trace("uploaded translog for {} {} ", primaryTerm, generation); @@ -537,8 +547,6 @@ public void onUploadComplete(TransferSnapshot transferSnapshot) throws IOExcepti @Override public void onUploadFailed(TransferSnapshot transferSnapshot, Exception ex) throws IOException { - transferReleasable.close(); - closeFilesIfNoPendingRetentionLocks(); if (ex instanceof IOException) { throw (IOException) ex; } else { diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java index fd4936603671c..d7e50cdcf32e4 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java @@ -227,7 +227,7 @@ private void captureStatsOnUploadFailure() { } public boolean downloadTranslog(String primaryTerm, String generation, Path location) throws IOException { - logger.info( + logger.trace( "Downloading translog files with: Primary Term = {}, Generation = {}, Location = {}", primaryTerm, generation, @@ -289,6 +289,7 @@ public TranslogTransferMetadata readMetadata() throws IOException { exceptionSetOnce.set(e); } finally { remoteTranslogTransferTracker.addDownloadTimeInMillis((System.nanoTime() - downloadStartTime) / 1_000_000L); + logger.debug("translogMetadataDownloadStatus={}", downloadStatus); if (downloadStatus) { remoteTranslogTransferTracker.addDownloadBytesSucceeded(bytesToRead); } diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 2ed3cb8d9e8ea..a72142e65c5e8 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -47,6 +47,7 @@ import org.opensearch.action.admin.indices.stats.CommonStatsFlags.Flag; import org.opensearch.action.admin.indices.stats.IndexShardStats; import org.opensearch.action.admin.indices.stats.ShardStats; +import org.opensearch.action.search.SearchRequestStats; import org.opensearch.action.search.SearchType; import org.opensearch.client.Client; import org.opensearch.cluster.ClusterState; @@ -132,6 +133,7 @@ import org.opensearch.index.shard.IndexShardState; import org.opensearch.index.shard.IndexingOperationListener; import org.opensearch.index.shard.IndexingStats; +import org.opensearch.index.shard.IndexingStats.Stats.DocStatusStats; import org.opensearch.index.store.remote.filecache.FileCacheCleaner; import org.opensearch.index.translog.InternalTranslogFactory; import org.opensearch.index.translog.RemoteBlobStoreInternalTranslogFactory; @@ -284,6 +286,18 @@ public class IndicesService extends AbstractLifecycleComponent Property.Dynamic ); + /** + * This setting is used to restrict creation or updation of index where the `index.translog.durability` index setting + * is set as ASYNC if enabled. If disabled, any of the durability mode can be used and switched at any later time from + * one to another. + */ + public static final Setting CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING = Setting.boolSetting( + "cluster.remote_store.index.restrict.async-durability", + false, + Property.NodeScope, + Property.Final + ); + /** * The node's settings. */ @@ -333,6 +347,8 @@ public class IndicesService extends AbstractLifecycleComponent private volatile TimeValue clusterRemoteTranslogBufferInterval; private final FileCacheCleaner fileCacheCleaner; + private final SearchRequestStats searchRequestStats; + @Override protected void doStart() { // Start thread that will manage cleaning the field data cache periodically @@ -363,6 +379,7 @@ public IndicesService( IndexStorePlugin.DirectoryFactory remoteDirectoryFactory, Supplier repositoriesServiceSupplier, FileCacheCleaner fileCacheCleaner, + SearchRequestStats searchRequestStats, @Nullable RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory ) { this.settings = settings; @@ -453,6 +470,7 @@ protected void closeInternal() { clusterService.getClusterSettings().addSettingsUpdateConsumer(ALLOW_EXPENSIVE_QUERIES, this::setAllowExpensiveQueries); this.remoteDirectoryFactory = remoteDirectoryFactory; this.translogFactorySupplier = getTranslogFactorySupplier(repositoriesServiceSupplier, threadPool, remoteStoreStatsTrackerFactory); + this.searchRequestStats = searchRequestStats; this.clusterDefaultRefreshInterval = CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.get(clusterService.getSettings()); clusterService.getClusterSettings() .addSettingsUpdateConsumer(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING, this::onRefreshIntervalUpdate); @@ -576,7 +594,7 @@ public NodeIndicesStats stats(CommonStatsFlags flags) { } } - return new NodeIndicesStats(commonStats, statsByShard(this, flags)); + return new NodeIndicesStats(commonStats, statsByShard(this, flags), searchRequestStats); } Map> statsByShard(final IndicesService indicesService, final CommonStatsFlags flags) { @@ -1041,6 +1059,15 @@ public IndicesQueryCache getIndicesQueryCache() { return indicesQueryCache; } + /** + * Accumulate stats from the passed Object + * + * @param stats Instance storing {@link DocStatusStats} + */ + public void addDocStatusStats(final DocStatusStats stats) { + oldShardsStats.indexingStats.getTotal().getDocStatusStats().add(stats); + } + /** * Statistics for old shards * diff --git a/server/src/main/java/org/opensearch/indices/NodeIndicesStats.java b/server/src/main/java/org/opensearch/indices/NodeIndicesStats.java index cc3d8193dfa6b..8a7aaba2726f4 100644 --- a/server/src/main/java/org/opensearch/indices/NodeIndicesStats.java +++ b/server/src/main/java/org/opensearch/indices/NodeIndicesStats.java @@ -35,6 +35,7 @@ import org.opensearch.action.admin.indices.stats.CommonStats; import org.opensearch.action.admin.indices.stats.IndexShardStats; import org.opensearch.action.admin.indices.stats.ShardStats; +import org.opensearch.action.search.SearchRequestStats; import org.opensearch.common.Nullable; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -71,7 +72,6 @@ * @opensearch.internal */ public class NodeIndicesStats implements Writeable, ToXContentFragment { - private CommonStats stats; private Map> statsByShard; @@ -92,7 +92,7 @@ public NodeIndicesStats(StreamInput in) throws IOException { } } - public NodeIndicesStats(CommonStats oldStats, Map> statsByShard) { + public NodeIndicesStats(CommonStats oldStats, Map> statsByShard, SearchRequestStats searchRequestStats) { // this.stats = stats; this.statsByShard = statsByShard; @@ -105,6 +105,9 @@ public NodeIndicesStats(CommonStats oldStats, Map> } } } + if (this.stats.search != null) { + this.stats.search.setSearchRequestStats(searchRequestStats); + } } @Nullable diff --git a/server/src/main/java/org/opensearch/indices/recovery/LocalStorePeerRecoverySourceHandler.java b/server/src/main/java/org/opensearch/indices/recovery/LocalStorePeerRecoverySourceHandler.java index a915046c381fc..ac6b2e6b77d18 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/LocalStorePeerRecoverySourceHandler.java +++ b/server/src/main/java/org/opensearch/indices/recovery/LocalStorePeerRecoverySourceHandler.java @@ -161,6 +161,7 @@ && isTargetSameHistory() }, shardId + " removing retention lease for [" + request.targetAllocationId() + "]", shard, cancellableThreads, logger); deleteRetentionLeaseStep.whenComplete(ignored -> { + logger.debug("deleteRetentionLeaseStep completed"); assert Transports.assertNotTransportThread(this + "[phase1]"); phase1(wrappedSafeCommit.get(), startingSeqNo, () -> estimateNumOps, sendFileStep, false); }, onFailure); @@ -172,12 +173,14 @@ && isTargetSameHistory() assert startingSeqNo >= 0 : "startingSeqNo must be non negative. got: " + startingSeqNo; sendFileStep.whenComplete(r -> { + logger.debug("sendFileStep completed"); assert Transports.assertNotTransportThread(this + "[prepareTargetForTranslog]"); // For a sequence based recovery, the target can keep its local translog prepareTargetForTranslog(countNumberOfHistoryOperations(startingSeqNo), prepareEngineStep); }, onFailure); prepareEngineStep.whenComplete(prepareEngineTime -> { + logger.debug("prepareEngineStep completed"); assert Transports.assertNotTransportThread(this + "[phase2]"); /* * add shard to replication group (shard will receive replication requests from this point on) now that engine is open. diff --git a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java index df1589b3f29b9..65c3e976d17fe 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java @@ -245,7 +245,7 @@ private void doRecovery(final long recoveryId, final StartRecoveryRequest preExi indexShard.prepareForIndexRecovery(); final boolean hasRemoteSegmentStore = indexShard.indexSettings().isRemoteStoreEnabled(); if (hasRemoteSegmentStore) { - indexShard.syncSegmentsFromRemoteSegmentStore(false); + indexShard.syncSegmentsFromRemoteSegmentStore(false, recoveryTarget::setLastAccessTime); } final boolean hasRemoteTranslog = recoveryTarget.state().getPrimary() == false && indexShard.isRemoteTranslogEnabled(); final boolean hasNoTranslog = indexShard.indexSettings().isRemoteSnapshot(); @@ -264,7 +264,7 @@ private void doRecovery(final long recoveryId, final StartRecoveryRequest preExi actionName = PeerRecoverySourceService.Actions.START_RECOVERY; } catch (final Exception e) { // this will be logged as warning later on... - logger.trace("unexpected error while preparing shard for peer recovery, failing recovery", e); + logger.debug("unexpected error while preparing shard for peer recovery, failing recovery", e); onGoingRecoveries.fail( recoveryId, new RecoveryFailedException(recoveryTarget.state(), "failed to prepare shard for recovery", e), @@ -272,12 +272,12 @@ private void doRecovery(final long recoveryId, final StartRecoveryRequest preExi ); return; } - logger.trace("{} starting recovery from {}", startRequest.shardId(), startRequest.sourceNode()); + logger.debug("{} starting recovery from {}", startRequest.shardId(), startRequest.sourceNode()); } else { startRequest = preExistingRequest; requestToSend = new ReestablishRecoveryRequest(recoveryId, startRequest.shardId(), startRequest.targetAllocationId()); actionName = PeerRecoverySourceService.Actions.REESTABLISH_RECOVERY; - logger.trace("{} reestablishing recovery from {}", startRequest.shardId(), startRequest.sourceNode()); + logger.debug("{} reestablishing recovery from {}", startRequest.shardId(), startRequest.sourceNode()); } } transportService.sendRequest( diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java index 6d7972d79a96c..7996c48b2b04b 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java @@ -201,9 +201,13 @@ protected void finalizeStepAndCompleteFuture( final StepListener finalizeStep = new StepListener<>(); // Recovery target can trim all operations >= startingSeqNo as we have sent all these operations in the phase 2 final long trimAboveSeqNo = startingSeqNo - 1; - sendSnapshotStep.whenComplete(r -> finalizeRecovery(r.targetLocalCheckpoint, trimAboveSeqNo, finalizeStep), onFailure); + sendSnapshotStep.whenComplete(r -> { + logger.debug("sendSnapshotStep completed"); + finalizeRecovery(r.targetLocalCheckpoint, trimAboveSeqNo, finalizeStep); + }, onFailure); finalizeStep.whenComplete(r -> { + logger.debug("finalizeStep completed"); final long phase1ThrottlingWaitTime = 0L; // TODO: return the actual throttle time final SendSnapshotResult sendSnapshotResult = sendSnapshotStep.result(); final SendFileResult sendFileResult = sendFileStep.result(); @@ -233,7 +237,10 @@ protected void onSendFileStepComplete( GatedCloseable wrappedSafeCommit, Releasable releaseStore ) { - sendFileStep.whenComplete(r -> IOUtils.close(wrappedSafeCommit, releaseStore), e -> { + sendFileStep.whenComplete(r -> { + logger.debug("sendFileStep completed"); + IOUtils.close(wrappedSafeCommit, releaseStore); + }, e -> { try { IOUtils.close(wrappedSafeCommit, releaseStore); } catch (final IOException ex) { @@ -445,16 +452,22 @@ void phase1( sendFileInfoStep ); - sendFileInfoStep.whenComplete( - r -> sendFiles(store, phase1Files.toArray(new StoreFileMetadata[0]), translogOps, sendFilesStep), - listener::onFailure - ); + sendFileInfoStep.whenComplete(r -> { + logger.debug("sendFileInfoStep completed"); + sendFiles(store, phase1Files.toArray(new StoreFileMetadata[0]), translogOps, sendFilesStep); + }, listener::onFailure); // When doing peer recovery of remote store enabled replica, retention leases are not required. if (skipCreateRetentionLeaseStep) { - sendFilesStep.whenComplete(r -> createRetentionLeaseStep.onResponse(null), listener::onFailure); + sendFilesStep.whenComplete(r -> { + logger.debug("sendFilesStep completed"); + createRetentionLeaseStep.onResponse(null); + }, listener::onFailure); } else { - sendFilesStep.whenComplete(r -> createRetentionLease(startingSeqNo, createRetentionLeaseStep), listener::onFailure); + sendFilesStep.whenComplete(r -> { + logger.debug("sendFilesStep completed"); + createRetentionLease(startingSeqNo, createRetentionLeaseStep); + }, listener::onFailure); } createRetentionLeaseStep.whenComplete(retentionLease -> { @@ -471,6 +484,7 @@ void phase1( final long totalSize = totalSizeInBytes; final long existingTotalSize = existingTotalSizeInBytes; cleanFilesStep.whenComplete(r -> { + logger.debug("cleanFilesStep completed"); final TimeValue took = stopWatch.totalTime(); logger.trace("recovery [phase1]: took [{}]", took); listener.onResponse( @@ -541,7 +555,10 @@ void createRetentionLease(final long startingSeqNo, ActionListener(logger, shard.getThreadPool(), ThreadPool.Names.GENERIC, cloneRetentionLeaseStep, false) ); logger.trace("cloned primary's retention lease as [{}]", clonedLease); - cloneRetentionLeaseStep.whenComplete(rr -> listener.onResponse(clonedLease), listener::onFailure); + cloneRetentionLeaseStep.whenComplete(rr -> { + logger.debug("cloneRetentionLeaseStep completed"); + listener.onResponse(clonedLease); + }, listener::onFailure); } catch (RetentionLeaseNotFoundException e) { // it's possible that the primary has no retention lease yet if we are doing a rolling upgrade from a version before // 7.4, and in that case we just create a lease using the local checkpoint of the safe commit which we're using for @@ -554,7 +571,10 @@ void createRetentionLease(final long startingSeqNo, ActionListener(logger, shard.getThreadPool(), ThreadPool.Names.GENERIC, addRetentionLeaseStep, false) ); - addRetentionLeaseStep.whenComplete(rr -> listener.onResponse(newLease), listener::onFailure); + addRetentionLeaseStep.whenComplete(rr -> { + logger.debug("addRetentionLeaseStep completed"); + listener.onResponse(newLease); + }, listener::onFailure); logger.trace("created retention lease with estimated checkpoint of [{}]", estimatedGlobalCheckpoint); } }, shardId + " establishing retention lease for [" + request.targetAllocationId() + "]", shard, cancellableThreads, logger); @@ -810,6 +830,7 @@ void finalizeRecovery(long targetLocalCheckpoint, long trimAboveSeqNo, ActionLis cancellableThreads.checkForCancel(); recoveryTarget.finalizeRecovery(globalCheckpoint, trimAboveSeqNo, finalizeListener); finalizeListener.whenComplete(r -> { + logger.debug("finalizeListenerStep completed"); RunUnderPrimaryPermit.run( () -> shard.updateGlobalCheckpointForShard(request.targetAllocationId(), globalCheckpoint), shardId + " updating " + request.targetAllocationId() + "'s global checkpoint", diff --git a/server/src/main/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandler.java b/server/src/main/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandler.java index 5d6c0eb3bae05..66c7a3b48f28f 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandler.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandler.java @@ -80,12 +80,14 @@ protected void innerRecoveryToTarget(ActionListener listener, assert startingSeqNo >= 0 : "startingSeqNo must be non negative. got: " + startingSeqNo; sendFileStep.whenComplete(r -> { + logger.debug("sendFileStep completed"); assert Transports.assertNotTransportThread(this + "[prepareTargetForTranslog]"); // For a sequence based recovery, the target can keep its local translog prepareTargetForTranslog(0, prepareEngineStep); }, onFailure); prepareEngineStep.whenComplete(prepareEngineTime -> { + logger.debug("prepareEngineStep completed"); assert Transports.assertNotTransportThread(this + "[phase2]"); RunUnderPrimaryPermit.run( () -> shard.initiateTracking(request.targetAllocationId()), diff --git a/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java b/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java index 7252fea044a02..aeb690465905f 100644 --- a/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java +++ b/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java @@ -13,18 +13,20 @@ import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FilterDirectory; -import org.apache.lucene.store.IOContext; import org.apache.lucene.util.Version; +import org.opensearch.action.support.GroupedActionListener; import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.core.action.ActionListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardState; +import org.opensearch.index.shard.ShardPath; import org.opensearch.index.store.RemoteSegmentStoreDirectory; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import java.nio.file.Path; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -103,34 +105,52 @@ public void getSegmentFiles( listener.onResponse(new GetSegmentFilesResponse(Collections.emptyList())); return; } - logger.trace("Downloading segments files from remote store {}", filesToFetch); + logger.debug("Downloading segment files from remote store {}", filesToFetch); RemoteSegmentMetadata remoteSegmentMetadata = remoteDirectory.readLatestMetadataFile(); - List downloadedSegments = new ArrayList<>(); + List toDownloadSegments = new ArrayList<>(); Collection directoryFiles = List.of(indexShard.store().directory().listAll()); if (remoteSegmentMetadata != null) { try { indexShard.store().incRef(); indexShard.remoteStore().incRef(); final Directory storeDirectory = indexShard.store().directory(); + final ShardPath shardPath = indexShard.shardPath(); for (StoreFileMetadata fileMetadata : filesToFetch) { String file = fileMetadata.name(); assert directoryFiles.contains(file) == false : "Local store already contains the file " + file; - storeDirectory.copyFrom(remoteDirectory, file, file, IOContext.DEFAULT); - downloadedSegments.add(fileMetadata); + toDownloadSegments.add(fileMetadata); } - logger.trace("Downloaded segments from remote store {}", downloadedSegments); + downloadSegments(storeDirectory, remoteDirectory, toDownloadSegments, shardPath, listener); + logger.debug("Downloaded segment files from remote store {}", toDownloadSegments); } finally { indexShard.store().decRef(); indexShard.remoteStore().decRef(); } } - listener.onResponse(new GetSegmentFilesResponse(downloadedSegments)); } catch (Exception e) { listener.onFailure(e); } } + private void downloadSegments( + Directory storeDirectory, + RemoteSegmentStoreDirectory remoteStoreDirectory, + List toDownloadSegments, + ShardPath shardPath, + ActionListener completionListener + ) { + final Path indexPath = shardPath == null ? null : shardPath.resolveIndex(); + final GroupedActionListener batchDownloadListener = new GroupedActionListener<>( + ActionListener.map(completionListener, v -> new GetSegmentFilesResponse(toDownloadSegments)), + toDownloadSegments.size() + ); + ActionListener segmentsDownloadListener = ActionListener.map(batchDownloadListener, result -> null); + toDownloadSegments.forEach( + fileMetadata -> remoteStoreDirectory.copyTo(fileMetadata.name(), storeDirectory, indexPath, segmentsDownloadListener) + ); + } + @Override public String getDescription() { return "RemoteStoreReplicationSource"; diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java index c071b22ba4cba..ffc4ab86661db 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java @@ -208,7 +208,7 @@ public SegmentReplicationState getSegmentReplicationState(ShardId shardId) { * @param replicaShard replica shard on which checkpoint is received */ public synchronized void onNewCheckpoint(final ReplicationCheckpoint receivedCheckpoint, final IndexShard replicaShard) { - logger.trace(() -> new ParameterizedMessage("Replica received new replication checkpoint from primary [{}]", receivedCheckpoint)); + logger.debug(() -> new ParameterizedMessage("Replica received new replication checkpoint from primary [{}]", receivedCheckpoint)); // if the shard is in any state if (replicaShard.state().equals(IndexShardState.CLOSED)) { // ignore if shard is closed @@ -224,7 +224,7 @@ public synchronized void onNewCheckpoint(final ReplicationCheckpoint receivedChe SegmentReplicationTarget ongoingReplicationTarget = onGoingReplications.getOngoingReplicationTarget(replicaShard.shardId()); if (ongoingReplicationTarget != null) { if (ongoingReplicationTarget.getCheckpoint().getPrimaryTerm() < receivedCheckpoint.getPrimaryTerm()) { - logger.trace( + logger.debug( () -> new ParameterizedMessage( "Cancelling ongoing replication {} from old primary with primary term {}", ongoingReplicationTarget.description(), @@ -233,7 +233,7 @@ public synchronized void onNewCheckpoint(final ReplicationCheckpoint receivedChe ); ongoingReplicationTarget.cancel("Cancelling stuck target after new primary"); } else { - logger.trace( + logger.debug( () -> new ParameterizedMessage( "Ignoring new replication checkpoint - shard is currently replicating to checkpoint {}", ongoingReplicationTarget.getCheckpoint() @@ -247,7 +247,7 @@ public synchronized void onNewCheckpoint(final ReplicationCheckpoint receivedChe startReplication(replicaShard, receivedCheckpoint, new SegmentReplicationListener() { @Override public void onReplicationDone(SegmentReplicationState state) { - logger.trace( + logger.debug( () -> new ParameterizedMessage( "[shardId {}] [replication id {}] Replication complete to {}, timing data: {}", replicaShard.shardId().getId(), @@ -512,7 +512,7 @@ private void start(final long replicationId) { target.startReplication(new ActionListener<>() { @Override public void onResponse(Void o) { - logger.trace(() -> new ParameterizedMessage("Finished replicating {} marking as done.", target.description())); + logger.debug(() -> new ParameterizedMessage("Finished replicating {} marking as done.", target.description())); onGoingReplications.markAsDone(replicationId); if (target.state().getIndex().recoveredFileCount() != 0 && target.state().getIndex().recoveredBytes() != 0) { completedReplications.put(target.shardId(), target); @@ -521,6 +521,7 @@ public void onResponse(Void o) { @Override public void onFailure(Exception e) { + logger.debug("Replication failed {}", target.description()); if (e instanceof OpenSearchCorruptionException) { onGoingReplications.fail(replicationId, new ReplicationFailedException("Store corruption during replication", e), true); return; diff --git a/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java b/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java index 76f0d333db977..11b01965c4af5 100644 --- a/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java +++ b/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java @@ -137,7 +137,7 @@ public String executor() { @Override public void handleResponse(ReplicationResponse response) { timer.stop(); - logger.trace( + logger.debug( () -> new ParameterizedMessage( "[shardId {}] Completed publishing checkpoint [{}], timing: {}", indexShard.shardId().getId(), @@ -152,7 +152,7 @@ public void handleResponse(ReplicationResponse response) { @Override public void handleException(TransportException e) { timer.stop(); - logger.trace("[shardId {}] Failed to publish checkpoint, timing: {}", indexShard.shardId().getId(), timer.time()); + logger.debug("[shardId {}] Failed to publish checkpoint, timing: {}", indexShard.shardId().getId(), timer.time()); task.setPhase("finished"); taskManager.unregister(task); if (ExceptionsHelper.unwrap( diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 1f8f17f8e8d91..729b38ca27394 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -46,6 +46,7 @@ import org.opensearch.action.admin.cluster.snapshots.status.TransportNodesSnapshotsStatus; import org.opensearch.action.search.SearchExecutionStatsCollector; import org.opensearch.action.search.SearchPhaseController; +import org.opensearch.action.search.SearchRequestStats; import org.opensearch.action.search.SearchTransportService; import org.opensearch.action.support.TransportAction; import org.opensearch.action.update.UpdateHelper; @@ -494,7 +495,7 @@ protected Node( for (ExtensionAwarePlugin extAwarePlugin : extensionAwarePlugins) { additionalSettings.addAll(extAwarePlugin.getExtensionSettings()); } - this.extensionsManager = new ExtensionsManager(additionalSettings); + this.extensionsManager = new ExtensionsManager(additionalSettings, identityService); } else { this.extensionsManager = new NoopExtensionsManager(); } @@ -761,6 +762,8 @@ protected Node( threadPool ); + final SearchRequestStats searchRequestStats = new SearchRequestStats(); + remoteStoreStatsTrackerFactory = new RemoteStoreStatsTrackerFactory(clusterService, settings); final IndicesService indicesService = new IndicesService( settings, @@ -786,6 +789,7 @@ protected Node( remoteDirectoryFactory, repositoriesServiceReference::get, fileCacheCleaner, + searchRequestStats, remoteStoreStatsTrackerFactory ); @@ -1199,6 +1203,7 @@ protected Node( b.bind(SystemIndices.class).toInstance(systemIndices); b.bind(IdentityService.class).toInstance(identityService); b.bind(Tracer.class).toInstance(tracer); + b.bind(SearchRequestStats.class).toInstance(searchRequestStats); b.bind(RemoteClusterStateService.class).toProvider(() -> remoteClusterStateService); b.bind(PersistedStateRegistry.class).toInstance(persistedStateRegistry); }); diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index 220d468a03090..490ebda24bf60 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -100,6 +100,7 @@ import org.opensearch.core.compress.Compressor; import org.opensearch.core.compress.CompressorRegistry; import org.opensearch.core.compress.NotXContentException; +import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.index.snapshots.IndexShardSnapshotFailedException; import org.opensearch.core.util.BytesRefUtils; @@ -1125,7 +1126,11 @@ private void executeStaleShardDelete( new RemoteSegmentStoreDirectoryFactory( remoteStoreLockManagerFactory.getRepositoriesService(), threadPool - ).newDirectory(remoteStoreRepoForIndex, indexUUID, shardId).close(); + ).newDirectory( + remoteStoreRepoForIndex, + indexUUID, + new ShardId(Index.UNKNOWN_INDEX_NAME, indexUUID, Integer.valueOf(shardId)) + ).close(); } } } @@ -1591,7 +1596,11 @@ private void executeOneStaleIndexDelete( new RemoteSegmentStoreDirectoryFactory( remoteStoreLockManagerFactory.getRepositoriesService(), threadPool - ).newDirectory(remoteStoreRepoForIndex, indexUUID, shardBlob.getKey()).close(); + ).newDirectory( + remoteStoreRepoForIndex, + indexUUID, + new ShardId(Index.UNKNOWN_INDEX_NAME, indexUUID, Integer.valueOf(shardBlob.getKey())) + ).close(); } } } diff --git a/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java index f37d53737d52b..e567fdd2d436c 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java +++ b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java @@ -10,6 +10,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Collector; +import org.opensearch.OpenSearchException; import org.opensearch.search.profile.AbstractProfileBreakdown; import org.opensearch.search.profile.ContextualProfileBreakdown; @@ -73,14 +74,14 @@ public Map toBreakdownMap() { // If there are no leaf contexts, then return the default concurrent query level breakdown, which will include the // create_weight time/count queryNodeTime = createWeightTime; - maxSliceNodeTime = queryNodeTime; - minSliceNodeTime = queryNodeTime; - avgSliceNodeTime = queryNodeTime; + maxSliceNodeTime = 0L; + minSliceNodeTime = 0L; + avgSliceNodeTime = 0L; return buildDefaultQueryBreakdownMap(createWeightTime); } // first create the slice level breakdowns - final Map> sliceLevelBreakdowns = buildSliceLevelBreakdown(createWeightStartTime); + final Map> sliceLevelBreakdowns = buildSliceLevelBreakdown(); return buildQueryBreakdownMap(sliceLevelBreakdowns, createWeightTime, createWeightStartTime); } @@ -124,18 +125,19 @@ private Map buildDefaultQueryBreakdownMap(long createWeightTime) { /** * Computes the slice level breakdownMap. It uses sliceCollectorsToLeaves to figure out all the leaves or segments part of a slice. * Then use the breakdown timing stats for each of these leaves to calculate the breakdown stats at slice level. - * @param createWeightStartTime start time when createWeight is called + * * @return map of collector (or slice) to breakdown map */ - Map> buildSliceLevelBreakdown(long createWeightStartTime) { + Map> buildSliceLevelBreakdown() { final Map> sliceLevelBreakdowns = new HashMap<>(); - long totalSliceNodeTime = 0; + long totalSliceNodeTime = 0L; for (Map.Entry> slice : sliceCollectorsToLeaves.entrySet()) { final Collector sliceCollector = slice.getKey(); // initialize each slice level breakdown final Map currentSliceBreakdown = sliceLevelBreakdowns.computeIfAbsent(sliceCollector, k -> new HashMap<>()); // max slice end time across all timing types long sliceMaxEndTime = Long.MIN_VALUE; + long sliceMinStartTime = Long.MAX_VALUE; for (QueryTimingType timingType : QueryTimingType.values()) { if (timingType.equals(QueryTimingType.CREATE_WEIGHT)) { // do nothing for create weight as that is query level time and not slice level @@ -155,6 +157,12 @@ Map> buildSliceLevelBreakdown(long createWeightStar // leaf, but the leaf level breakdown will not be created in the contexts map. // This is because before updating the contexts map, the query hits earlyTerminationException. // To handle such case, we will ignore the leaf that is not present. + // + // Other than early termination, it can also happen in other cases. For example: there is a must boolean query + // with 2 boolean clauses. While creating scorer for first clause if no docs are found for the field in a leaf + // context then it will return null scorer. Then for 2nd clause weight as well no scorer will be created for this + // leaf context (as it is a must query). Due to this it will end up missing the leaf context in the contexts map + // for second clause weight. continue; } final Map currentSliceLeafBreakdownMap = contexts.get(sliceLeaf).toBreakdownMap(); @@ -182,15 +190,36 @@ Map> buildSliceLevelBreakdown(long createWeightStar ); } // compute sliceMaxEndTime as max of sliceEndTime across all timing types - sliceMaxEndTime = Math.max(sliceMaxEndTime, currentSliceBreakdown.get(timingTypeSliceEndTimeKey)); + sliceMaxEndTime = Math.max(sliceMaxEndTime, currentSliceBreakdown.getOrDefault(timingTypeSliceEndTimeKey, Long.MIN_VALUE)); + sliceMinStartTime = Math.min( + sliceMinStartTime, + currentSliceBreakdown.getOrDefault(timingTypeSliceStartTimeKey, Long.MAX_VALUE) + ); // compute total time for each timing type at slice level using sliceEndTime and sliceStartTime currentSliceBreakdown.put( timingType.toString(), - currentSliceBreakdown.get(timingTypeSliceEndTimeKey) - currentSliceBreakdown.get(timingTypeSliceStartTimeKey) + currentSliceBreakdown.getOrDefault(timingTypeSliceEndTimeKey, 0L) - currentSliceBreakdown.getOrDefault( + timingTypeSliceStartTimeKey, + 0L + ) ); } - // currentSliceNodeTime includes the create weight time as well which will be same for all the slices - long currentSliceNodeTime = sliceMaxEndTime - createWeightStartTime; + // currentSliceNodeTime does not include the create weight time, as that is computed in non-concurrent part + long currentSliceNodeTime; + if (sliceMinStartTime == Long.MAX_VALUE && sliceMaxEndTime == Long.MIN_VALUE) { + currentSliceNodeTime = 0L; + } else if (sliceMinStartTime == Long.MAX_VALUE || sliceMaxEndTime == Long.MIN_VALUE) { + throw new OpenSearchException( + "Unexpected value of sliceMinStartTime [" + + sliceMinStartTime + + "] or sliceMaxEndTime [" + + sliceMaxEndTime + + "] while computing the slice level timing profile breakdowns" + ); + } else { + currentSliceNodeTime = sliceMaxEndTime - sliceMinStartTime; + } + // compute max/min slice times maxSliceNodeTime = Math.max(maxSliceNodeTime, currentSliceNodeTime); minSliceNodeTime = Math.min(minSliceNodeTime, currentSliceNodeTime); @@ -245,8 +274,8 @@ public Map buildQueryBreakdownMap( // for all other timing types, we will compute min/max/avg/total across slices for (Map.Entry> sliceBreakdown : sliceLevelBreakdowns.entrySet()) { - Long sliceBreakdownTypeTime = sliceBreakdown.getValue().get(timingTypeKey); - Long sliceBreakdownTypeCount = sliceBreakdown.getValue().get(timingTypeCountKey); + long sliceBreakdownTypeTime = sliceBreakdown.getValue().getOrDefault(timingTypeKey, 0L); + long sliceBreakdownTypeCount = sliceBreakdown.getValue().getOrDefault(timingTypeCountKey, 0L); // compute max/min/avg TimingType time across slices queryBreakdownMap.compute( maxBreakdownTypeTime, @@ -276,17 +305,38 @@ public Map buildQueryBreakdownMap( ); // query start/end time for a TimingType is min/max of start/end time across slices for that TimingType - queryTimingTypeEndTime = Math.max(queryTimingTypeEndTime, sliceBreakdown.getValue().get(sliceEndTimeForTimingType)); - queryTimingTypeStartTime = Math.min(queryTimingTypeStartTime, sliceBreakdown.getValue().get(sliceStartTimeForTimingType)); + queryTimingTypeEndTime = Math.max( + queryTimingTypeEndTime, + sliceBreakdown.getValue().getOrDefault(sliceEndTimeForTimingType, Long.MIN_VALUE) + ); + queryTimingTypeStartTime = Math.min( + queryTimingTypeStartTime, + sliceBreakdown.getValue().getOrDefault(sliceStartTimeForTimingType, Long.MAX_VALUE) + ); queryTimingTypeCount += sliceBreakdownTypeCount; } + + if (queryTimingTypeStartTime == Long.MAX_VALUE || queryTimingTypeEndTime == Long.MIN_VALUE) { + throw new OpenSearchException( + "Unexpected timing type [" + + timingTypeKey + + "] start [" + + queryTimingTypeStartTime + + "] or end time [" + + queryTimingTypeEndTime + + "] computed across slices for profile results" + ); + } queryBreakdownMap.put(timingTypeKey, queryTimingTypeEndTime - queryTimingTypeStartTime); queryBreakdownMap.put(timingTypeCountKey, queryTimingTypeCount); - queryBreakdownMap.compute(avgBreakdownTypeTime, (key, value) -> (value == null) ? 0 : value / sliceLevelBreakdowns.size()); - queryBreakdownMap.compute(avgBreakdownTypeCount, (key, value) -> (value == null) ? 0 : value / sliceLevelBreakdowns.size()); + queryBreakdownMap.compute(avgBreakdownTypeTime, (key, value) -> (value == null) ? 0L : value / sliceLevelBreakdowns.size()); + queryBreakdownMap.compute(avgBreakdownTypeCount, (key, value) -> (value == null) ? 0L : value / sliceLevelBreakdowns.size()); // compute query end time using max of query end time across all timing types queryEndTime = Math.max(queryEndTime, queryTimingTypeEndTime); } + if (queryEndTime == Long.MIN_VALUE) { + throw new OpenSearchException("Unexpected error while computing the query end time across slices in profile result"); + } queryNodeTime = queryEndTime - createWeightStartTime; return queryBreakdownMap; } diff --git a/server/src/main/java/org/opensearch/search/query/TopDocsCollectorContext.java b/server/src/main/java/org/opensearch/search/query/TopDocsCollectorContext.java index 39c34f7c0d5d5..86f6e542f97d1 100644 --- a/server/src/main/java/org/opensearch/search/query/TopDocsCollectorContext.java +++ b/server/src/main/java/org/opensearch/search/query/TopDocsCollectorContext.java @@ -183,7 +183,9 @@ CollectorManager createManager(CollectorManager( new TotalHitCountCollectorManager(sort), trackTotalHitsUpTo, diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/SpanBuilder.java b/server/src/main/java/org/opensearch/telemetry/tracing/SpanBuilder.java index d247d48389fc8..a93ce11f374fe 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/SpanBuilder.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/SpanBuilder.java @@ -45,7 +45,7 @@ private SpanBuilder() { * @return context. */ public static SpanCreationContext from(HttpRequest request) { - return new SpanCreationContext(createSpanName(request), buildSpanAttributes(request)); + return SpanCreationContext.server().name(createSpanName(request)).attributes(buildSpanAttributes(request)); } /** @@ -54,7 +54,7 @@ public static SpanCreationContext from(HttpRequest request) { * @return context */ public static SpanCreationContext from(RestRequest request) { - return new SpanCreationContext(createSpanName(request), buildSpanAttributes(request)); + return SpanCreationContext.client().name(createSpanName(request)).attributes(buildSpanAttributes(request)); } /** @@ -64,7 +64,7 @@ public static SpanCreationContext from(RestRequest request) { * @return context */ public static SpanCreationContext from(String action, Transport.Connection connection) { - return new SpanCreationContext(createSpanName(action, connection), buildSpanAttributes(action, connection)); + return SpanCreationContext.server().name(createSpanName(action, connection)).attributes(buildSpanAttributes(action, connection)); } private static String createSpanName(HttpRequest httpRequest) { diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/WrappedTracer.java b/server/src/main/java/org/opensearch/telemetry/tracing/WrappedTracer.java index d8cf39d4a4d09..b2308402379ac 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/WrappedTracer.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/WrappedTracer.java @@ -10,7 +10,6 @@ import org.opensearch.common.annotation.InternalApi; import org.opensearch.telemetry.TelemetrySettings; -import org.opensearch.telemetry.tracing.attributes.Attributes; import org.opensearch.telemetry.tracing.noop.NoopTracer; import java.io.IOException; @@ -41,17 +40,7 @@ public WrappedTracer(TelemetrySettings telemetrySettings, Tracer defaultTracer) @Override public Span startSpan(SpanCreationContext context) { - return startSpan(context.getSpanName(), context.getAttributes()); - } - - @Override - public Span startSpan(String spanName) { - return startSpan(spanName, Attributes.EMPTY); - } - - @Override - public Span startSpan(String spanName, Attributes attributes) { - return startSpan(spanName, (SpanContext) null, attributes); + return getDelegateTracer().startSpan(context); } @Override @@ -62,12 +51,7 @@ public SpanContext getCurrentSpan() { @Override public ScopedSpan startScopedSpan(SpanCreationContext spanCreationContext) { - return startScopedSpan(spanCreationContext, null); - } - - @Override - public ScopedSpan startScopedSpan(SpanCreationContext spanCreationContext, SpanContext parentSpan) { - return getDelegateTracer().startScopedSpan(spanCreationContext, parentSpan); + return getDelegateTracer().startScopedSpan(spanCreationContext); } @Override @@ -75,12 +59,6 @@ public SpanScope withSpanInScope(Span span) { return getDelegateTracer().withSpanInScope(span); } - @Override - public Span startSpan(String spanName, SpanContext parentSpan, Attributes attributes) { - Tracer delegateTracer = getDelegateTracer(); - return delegateTracer.startSpan(spanName, parentSpan, attributes); - } - @Override public void close() throws IOException { defaultTracer.close(); diff --git a/server/src/test/java/org/opensearch/action/ActionModuleTests.java b/server/src/test/java/org/opensearch/action/ActionModuleTests.java index deb0bb58a5c4d..8479f011adf48 100644 --- a/server/src/test/java/org/opensearch/action/ActionModuleTests.java +++ b/server/src/test/java/org/opensearch/action/ActionModuleTests.java @@ -143,7 +143,7 @@ public void testSetupRestHandlerContainsKnownBuiltin() throws IOException { usageService, null, new IdentityService(Settings.EMPTY, new ArrayList<>()), - new ExtensionsManager(Set.of()) + new ExtensionsManager(Set.of(), new IdentityService(Settings.EMPTY, List.of())) ); actionModule.initRestHandlers(null); // At this point the easiest way to confirm that a handler is loaded is to try to register another one on top of it and to fail diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java index 672ebea8d01f9..e3f16463a5328 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -34,6 +34,7 @@ import org.opensearch.action.admin.indices.stats.CommonStats; import org.opensearch.action.admin.indices.stats.CommonStatsFlags; +import org.opensearch.action.search.SearchRequestStats; import org.opensearch.cluster.coordination.PendingClusterStateStats; import org.opensearch.cluster.coordination.PublishClusterStateStats; import org.opensearch.cluster.node.DiscoveryNode; @@ -801,8 +802,7 @@ public static NodeStats createNodeStats(boolean remoteStoreStats) { private static NodeIndicesStats getNodeIndicesStats(boolean remoteStoreStats) { NodeIndicesStats indicesStats = null; if (remoteStoreStats) { - indicesStats = new NodeIndicesStats(new CommonStats(CommonStatsFlags.ALL), new HashMap<>()); - + indicesStats = new NodeIndicesStats(new CommonStats(CommonStatsFlags.ALL), new HashMap<>(), new SearchRequestStats()); RemoteSegmentStats remoteSegmentStats = indicesStats.getSegments().getRemoteSegmentStats(); remoteSegmentStats.addUploadBytesStarted(10L); remoteSegmentStats.addUploadBytesSucceeded(10L); diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java index 9ef0f85893fc8..0f67eff26cbde 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java @@ -154,6 +154,7 @@ private void indicesThatCannotBeCreatedTestCase( Settings.EMPTY, new ClusterService(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null) ), + null, new SystemIndices(emptyMap()) ) { @Override diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java index 5410d6a88a5b9..515f6eae28a34 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java @@ -171,6 +171,7 @@ class TestTransportBulkAction extends TransportBulkAction { SETTINGS, new ClusterService(SETTINGS, new ClusterSettings(SETTINGS, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null) ), + null, new SystemIndices(emptyMap()) ); } diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTests.java index 098bd8e8d8cfe..10cad6fb147a2 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTests.java @@ -60,6 +60,7 @@ import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.IndexingPressureService; import org.opensearch.index.VersionType; +import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndexDescriptor; import org.opensearch.indices.SystemIndices; import org.opensearch.telemetry.tracing.noop.NoopTracer; @@ -88,6 +89,7 @@ import static org.opensearch.test.ClusterServiceUtils.createClusterService; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; public class TransportBulkActionTests extends OpenSearchTestCase { @@ -115,6 +117,7 @@ class TestTransportBulkAction extends TransportBulkAction { new Resolver(), new AutoCreateIndex(Settings.EMPTY, clusterService.getClusterSettings(), new Resolver(), new SystemIndices(emptyMap())), new IndexingPressureService(Settings.EMPTY, clusterService), + mock(IndicesService.class), new SystemIndices(emptyMap()) ); } diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTookTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTookTests.java index 829eee45cac5b..852e3837e1e7a 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTookTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTookTests.java @@ -280,6 +280,7 @@ static class TestTransportBulkAction extends TransportBulkAction { indexNameExpressionResolver, autoCreateIndex, new IndexingPressureService(Settings.EMPTY, clusterService), + null, new SystemIndices(emptyMap()), relativeTimeProvider ); diff --git a/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java b/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java index 705479ec21fc1..f628bb3201452 100644 --- a/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java @@ -38,8 +38,12 @@ import org.opensearch.cluster.routing.GroupShardsIterator; import org.opensearch.common.UUIDs; import org.opensearch.common.collect.Tuple; +import org.opensearch.common.util.concurrent.AtomicArray; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.set.Sets; import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.NoopCircuitBreaker; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.query.MatchAllQueryBuilder; @@ -51,7 +55,10 @@ import org.opensearch.search.internal.ShardSearchContextId; import org.opensearch.search.internal.ShardSearchRequest; import org.opensearch.search.query.QuerySearchResult; +import org.opensearch.test.InternalAggregationTestCase; import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Transport; import org.junit.After; import org.junit.Before; @@ -65,6 +72,7 @@ import java.util.UUID; import java.util.concurrent.CopyOnWriteArraySet; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Executor; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; @@ -77,18 +85,21 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Mockito.mock; public class AbstractSearchAsyncActionTests extends OpenSearchTestCase { private final List> resolvedNodes = new ArrayList<>(); private final Set releasedContexts = new CopyOnWriteArraySet<>(); private ExecutorService executor; + ThreadPool threadPool; @Before @Override public void setUp() throws Exception { super.setUp(); executor = Executors.newFixedThreadPool(1); + threadPool = new TestThreadPool(getClass().getName()); } @After @@ -97,6 +108,7 @@ public void tearDown() throws Exception { super.tearDown(); executor.shutdown(); assertTrue(executor.awaitTermination(1, TimeUnit.SECONDS)); + ThreadPool.terminate(threadPool, 5, TimeUnit.SECONDS); } private AbstractSearchAsyncAction createAction( @@ -126,6 +138,7 @@ private AbstractSearchAsyncAction createAction( final AtomicLong expected, final SearchShardIterator... shards ) { + final Runnable runnable; final TransportSearchAction.SearchTimeProvider timeProvider; if (controlled) { @@ -161,7 +174,8 @@ private AbstractSearchAsyncAction createAction( null, results, request.getMaxConcurrentShardRequests(), - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + null ) { @Override protected SearchPhase getNextPhase(final SearchPhaseResults results, SearchPhaseContext context) { @@ -313,6 +327,53 @@ public void testSendSearchResponseDisallowPartialFailures() { assertEquals(requestIds, releasedContexts); } + public void testOnPhaseFailureAndVerifyListeners() { + SearchRequestStats testListener = new SearchRequestStats(); + + final List requestOperationListeners = new ArrayList<>(List.of(testListener)); + SearchQueryThenFetchAsyncAction action = createSearchQueryThenFetchAsyncAction(requestOperationListeners); + action.start(); + assertEquals(1, testListener.getPhaseCurrent(action.getSearchPhaseName())); + action.onPhaseFailure(new SearchPhase("test") { + @Override + public void run() { + + } + }, "message", null); + assertEquals(0, testListener.getPhaseCurrent(action.getSearchPhaseName())); + assertEquals(0, testListener.getPhaseTotal(action.getSearchPhaseName())); + + SearchDfsQueryThenFetchAsyncAction searchDfsQueryThenFetchAsyncAction = createSearchDfsQueryThenFetchAsyncAction( + requestOperationListeners + ); + searchDfsQueryThenFetchAsyncAction.start(); + assertEquals(1, testListener.getPhaseCurrent(searchDfsQueryThenFetchAsyncAction.getSearchPhaseName())); + searchDfsQueryThenFetchAsyncAction.onPhaseFailure(new SearchPhase("test") { + @Override + public void run() { + + } + }, "message", null); + assertEquals(0, testListener.getPhaseCurrent(action.getSearchPhaseName())); + assertEquals(0, testListener.getPhaseTotal(action.getSearchPhaseName())); + + FetchSearchPhase fetchPhase = createFetchSearchPhase(); + ShardId shardId = new ShardId(randomAlphaOfLengthBetween(5, 10), randomAlphaOfLength(10), randomInt()); + SearchShardIterator searchShardIterator = new SearchShardIterator(null, shardId, Collections.emptyList(), OriginalIndices.NONE); + searchShardIterator.resetAndSkip(); + action.skipShard(searchShardIterator); + action.executeNextPhase(action, fetchPhase); + assertEquals(1, testListener.getPhaseCurrent(fetchPhase.getSearchPhaseName())); + action.onPhaseFailure(new SearchPhase("test") { + @Override + public void run() { + + } + }, "message", null); + assertEquals(0, testListener.getPhaseCurrent(fetchPhase.getSearchPhaseName())); + assertEquals(0, testListener.getPhaseTotal(fetchPhase.getSearchPhaseName())); + } + public void testOnPhaseFailure() { SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(false); AtomicReference exception = new AtomicReference<>(); @@ -321,6 +382,7 @@ public void testOnPhaseFailure() { List> nodeLookups = new ArrayList<>(); ArraySearchPhaseResults phaseResults = phaseResults(requestIds, nodeLookups, 0); AbstractSearchAsyncAction action = createAction(searchRequest, phaseResults, listener, false, new AtomicLong()); + action.onPhaseFailure(new SearchPhase("test") { @Override public void run() { @@ -528,6 +590,215 @@ public void onFailure(Exception e) { assertThat(searchResponse.getSuccessfulShards(), equalTo(shards.length)); } + public void testOnPhaseListenersWithQueryAndThenFetchType() throws InterruptedException { + SearchRequestStats testListener = new SearchRequestStats(); + final List requestOperationListeners = new ArrayList<>(List.of(testListener)); + + long delay = (randomIntBetween(1, 5)); + delay = delay * 10; + + SearchQueryThenFetchAsyncAction action = createSearchQueryThenFetchAsyncAction(requestOperationListeners); + action.start(); + + // Verify queryPhase current metric + assertEquals(1, testListener.getPhaseCurrent(action.getSearchPhaseName())); + TimeUnit.MILLISECONDS.sleep(delay); + + FetchSearchPhase fetchPhase = createFetchSearchPhase(); + ShardId shardId = new ShardId(randomAlphaOfLengthBetween(5, 10), randomAlphaOfLength(10), randomInt()); + SearchShardIterator searchShardIterator = new SearchShardIterator(null, shardId, Collections.emptyList(), OriginalIndices.NONE); + searchShardIterator.resetAndSkip(); + action.skipShard(searchShardIterator); + action.executeNextPhase(action, fetchPhase); + + // Verify queryPhase total, current and latency metrics + assertEquals(0, testListener.getPhaseCurrent(action.getSearchPhaseName())); + assertThat(testListener.getPhaseMetric(action.getSearchPhaseName()), greaterThanOrEqualTo(delay)); + assertEquals(1, testListener.getPhaseTotal(action.getSearchPhaseName())); + + // Verify fetchPhase current metric + assertEquals(1, testListener.getPhaseCurrent(fetchPhase.getSearchPhaseName())); + TimeUnit.MILLISECONDS.sleep(delay); + + ExpandSearchPhase expandPhase = createExpandSearchPhase(); + action.executeNextPhase(fetchPhase, expandPhase); + TimeUnit.MILLISECONDS.sleep(delay); + + // Verify fetchPhase total, current and latency metrics + assertThat(testListener.getPhaseMetric(fetchPhase.getSearchPhaseName()), greaterThanOrEqualTo(delay)); + assertEquals(1, testListener.getPhaseTotal(fetchPhase.getSearchPhaseName())); + assertEquals(0, testListener.getPhaseCurrent(fetchPhase.getSearchPhaseName())); + + assertEquals(1, testListener.getPhaseCurrent(expandPhase.getSearchPhaseName())); + + action.executeNextPhase(expandPhase, fetchPhase); + + action.sendSearchResponse(mock(InternalSearchResponse.class), mock(String.valueOf(QuerySearchResult.class))); + assertThat(testListener.getPhaseMetric(expandPhase.getSearchPhaseName()), greaterThanOrEqualTo(delay)); + assertEquals(1, testListener.getPhaseTotal(expandPhase.getSearchPhaseName())); + assertEquals(0, testListener.getPhaseCurrent(expandPhase.getSearchPhaseName())); + } + + public void testOnPhaseListenersWithDfsType() throws InterruptedException { + SearchRequestStats testListener = new SearchRequestStats(); + final List requestOperationListeners = new ArrayList<>(List.of(testListener)); + + SearchDfsQueryThenFetchAsyncAction searchDfsQueryThenFetchAsyncAction = createSearchDfsQueryThenFetchAsyncAction( + requestOperationListeners + ); + long delay = (randomIntBetween(1, 5)); + + FetchSearchPhase fetchPhase = createFetchSearchPhase(); + searchDfsQueryThenFetchAsyncAction.start(); + assertEquals(1, testListener.getPhaseCurrent(searchDfsQueryThenFetchAsyncAction.getSearchPhaseName())); + TimeUnit.MILLISECONDS.sleep(delay); + ShardId shardId = new ShardId(randomAlphaOfLengthBetween(5, 10), randomAlphaOfLength(10), randomInt()); + SearchShardIterator searchShardIterator = new SearchShardIterator(null, shardId, Collections.emptyList(), OriginalIndices.NONE); + searchShardIterator.resetAndSkip(); + + searchDfsQueryThenFetchAsyncAction.skipShard(searchShardIterator); + searchDfsQueryThenFetchAsyncAction.executeNextPhase(searchDfsQueryThenFetchAsyncAction, fetchPhase); + + assertThat(testListener.getPhaseMetric(searchDfsQueryThenFetchAsyncAction.getSearchPhaseName()), greaterThanOrEqualTo(delay)); + assertEquals(1, testListener.getPhaseTotal(searchDfsQueryThenFetchAsyncAction.getSearchPhaseName())); + assertEquals(0, testListener.getPhaseCurrent(searchDfsQueryThenFetchAsyncAction.getSearchPhaseName())); + } + + private SearchDfsQueryThenFetchAsyncAction createSearchDfsQueryThenFetchAsyncAction( + List searchRequestOperationsListeners + ) { + SearchPhaseController controller = new SearchPhaseController( + writableRegistry(), + r -> InternalAggregationTestCase.emptyReduceContextBuilder() + ); + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + SearchTask task = new SearchTask(0, "n/a", "n/a", () -> "test", null, Collections.emptyMap()); + Executor executor = OpenSearchExecutors.newDirectExecutorService(); + SearchShardIterator shards = new SearchShardIterator(null, null, Collections.emptyList(), null); + GroupShardsIterator shardsIter = new GroupShardsIterator<>(List.of(shards)); + QueryPhaseResultConsumer resultConsumer = new QueryPhaseResultConsumer( + searchRequest, + executor, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + controller, + task.getProgressListener(), + writableRegistry(), + shardsIter.size(), + exc -> {} + ); + AtomicReference exception = new AtomicReference<>(); + ActionListener listener = ActionListener.wrap(response -> fail("onResponse should not be called"), exception::set); + + return new SearchDfsQueryThenFetchAsyncAction( + logger, + null, + null, + null, + null, + null, + null, + executor, + resultConsumer, + searchRequest, + listener, + shardsIter, + null, + null, + task, + SearchResponse.Clusters.EMPTY, + new SearchRequestOperationsListener.CompositeListener(searchRequestOperationsListeners, logger) + ); + } + + private SearchQueryThenFetchAsyncAction createSearchQueryThenFetchAsyncAction( + List searchRequestOperationsListeners + ) { + SearchPhaseController controller = new SearchPhaseController( + writableRegistry(), + r -> InternalAggregationTestCase.emptyReduceContextBuilder() + ); + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + SearchTask task = new SearchTask(0, "n/a", "n/a", () -> "test", null, Collections.emptyMap()); + Executor executor = OpenSearchExecutors.newDirectExecutorService(); + SearchShardIterator shards = new SearchShardIterator(null, null, Collections.emptyList(), null); + GroupShardsIterator shardsIter = new GroupShardsIterator<>(List.of(shards)); + QueryPhaseResultConsumer resultConsumer = new QueryPhaseResultConsumer( + searchRequest, + executor, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + controller, + task.getProgressListener(), + writableRegistry(), + shardsIter.size(), + exc -> {} + ); + AtomicReference exception = new AtomicReference<>(); + ActionListener listener = ActionListener.wrap(response -> fail("onResponse should not be called"), exception::set); + return new SearchQueryThenFetchAsyncAction( + logger, + null, + null, + null, + null, + null, + null, + executor, + resultConsumer, + searchRequest, + listener, + shardsIter, + null, + null, + task, + SearchResponse.Clusters.EMPTY, + new SearchRequestOperationsListener.CompositeListener(searchRequestOperationsListeners, logger) + ) { + @Override + ShardSearchFailure[] buildShardFailures() { + return ShardSearchFailure.EMPTY_ARRAY; + } + + @Override + public void sendSearchResponse(InternalSearchResponse internalSearchResponse, AtomicArray queryResults) { + start(); + } + }; + } + + private FetchSearchPhase createFetchSearchPhase() { + SearchPhaseController controller = new SearchPhaseController( + writableRegistry(), + r -> InternalAggregationTestCase.emptyReduceContextBuilder() + ); + MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(1); + QueryPhaseResultConsumer results = controller.newSearchPhaseResults( + OpenSearchExecutors.newDirectExecutorService(), + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + SearchProgressListener.NOOP, + mockSearchPhaseContext.getRequest(), + 1, + exc -> {} + ); + return new FetchSearchPhase( + results, + controller, + null, + mockSearchPhaseContext, + (searchResponse, scrollId) -> new SearchPhase("test") { + @Override + public void run() { + mockSearchPhaseContext.sendSearchResponse(searchResponse, null); + } + } + ); + } + + private ExpandSearchPhase createExpandSearchPhase() { + MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(1); + InternalSearchResponse internalSearchResponse = new InternalSearchResponse(null, null, null, null, false, null, 1); + return new ExpandSearchPhase(mockSearchPhaseContext, internalSearchResponse, null); + } + private static final class PhaseResult extends SearchPhaseResult { PhaseResult(ShardSearchContextId contextId) { this.contextId = contextId; diff --git a/server/src/test/java/org/opensearch/action/search/CanMatchPreFilterSearchPhaseTests.java b/server/src/test/java/org/opensearch/action/search/CanMatchPreFilterSearchPhaseTests.java index 45f00a8418d5c..43029fe57d5dd 100644 --- a/server/src/test/java/org/opensearch/action/search/CanMatchPreFilterSearchPhaseTests.java +++ b/server/src/test/java/org/opensearch/action/search/CanMatchPreFilterSearchPhaseTests.java @@ -136,7 +136,8 @@ public void run() throws IOException { latch.countDown(); } }, - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + null ); canMatchPhase.start(); @@ -227,7 +228,8 @@ public void run() throws IOException { latch.countDown(); } }, - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + null ); canMatchPhase.start(); @@ -317,7 +319,8 @@ public void sendCanMatch( null, new ArraySearchPhaseResults<>(iter.size()), randomIntBetween(1, 32), - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + null ) { @Override @@ -344,7 +347,8 @@ protected void executePhaseOnShard( } } }, - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + null ); canMatchPhase.start(); @@ -428,7 +432,8 @@ public void run() { latch.countDown(); } }, - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + null ); canMatchPhase.start(); @@ -527,7 +532,8 @@ public void run() { latch.countDown(); } }, - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + null ); canMatchPhase.start(); diff --git a/server/src/test/java/org/opensearch/action/search/MockSearchPhaseContext.java b/server/src/test/java/org/opensearch/action/search/MockSearchPhaseContext.java index e078b4a467e91..b5e1050b968ee 100644 --- a/server/src/test/java/org/opensearch/action/search/MockSearchPhaseContext.java +++ b/server/src/test/java/org/opensearch/action/search/MockSearchPhaseContext.java @@ -99,6 +99,11 @@ public SearchRequest getRequest() { return searchRequest; } + @Override + public SearchPhase getCurrentPhase() { + return null; + } + @Override public void sendSearchResponse(InternalSearchResponse internalSearchResponse, AtomicArray queryResults) { String scrollId = getRequest().scroll() != null ? TransportSearchHelper.buildScrollId(queryResults, Version.CURRENT) : null; diff --git a/server/src/test/java/org/opensearch/action/search/SearchAsyncActionTests.java b/server/src/test/java/org/opensearch/action/search/SearchAsyncActionTests.java index 830fa99f90bb9..4b94b6589c6c8 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchAsyncActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchAsyncActionTests.java @@ -135,7 +135,8 @@ public void testSkipSearchShards() throws InterruptedException { null, new ArraySearchPhaseResults<>(shardsIter.size()), request.getMaxConcurrentShardRequests(), - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + null ) { @Override @@ -253,7 +254,8 @@ public void testLimitConcurrentShardRequests() throws InterruptedException { null, new ArraySearchPhaseResults<>(shardsIter.size()), request.getMaxConcurrentShardRequests(), - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + null ) { @Override @@ -370,7 +372,8 @@ public void sendFreeContext(Transport.Connection connection, ShardSearchContextI null, new ArraySearchPhaseResults<>(shardsIter.size()), request.getMaxConcurrentShardRequests(), - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + null ) { TestSearchResponse response = new TestSearchResponse(); @@ -492,7 +495,8 @@ public void sendFreeContext(Transport.Connection connection, ShardSearchContextI null, new ArraySearchPhaseResults<>(shardsIter.size()), request.getMaxConcurrentShardRequests(), - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + null ) { TestSearchResponse response = new TestSearchResponse(); @@ -605,9 +609,9 @@ public void testAllowPartialResults() throws InterruptedException { null, new ArraySearchPhaseResults<>(shardsIter.size()), request.getMaxConcurrentShardRequests(), - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + null ) { - @Override protected void executePhaseOnShard( SearchShardIterator shardIt, diff --git a/server/src/test/java/org/opensearch/action/search/SearchQueryThenFetchAsyncActionTests.java b/server/src/test/java/org/opensearch/action/search/SearchQueryThenFetchAsyncActionTests.java index 0e2780c195cb8..6a22a7ea2b5e4 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchQueryThenFetchAsyncActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchQueryThenFetchAsyncActionTests.java @@ -214,7 +214,8 @@ public void sendExecuteQuery( timeProvider, null, task, - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + null ) { @Override protected SearchPhase getNextPhase(SearchPhaseResults results, SearchPhaseContext context) { @@ -226,6 +227,7 @@ public void run() { }; } }; + action.start(); latch.await(); assertThat(successfulOps.get(), equalTo(numShards)); diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerTests.java b/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerTests.java new file mode 100644 index 0000000000000..ef880043e863c --- /dev/null +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerTests.java @@ -0,0 +1,69 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.test.OpenSearchTestCase; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class SearchRequestOperationsListenerTests extends OpenSearchTestCase { + + public void testListenersAreExecuted() { + Map searchPhaseMap = new HashMap<>(); + + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + searchPhaseMap.put(searchPhaseName, new SearchRequestStats.StatsHolder()); + } + SearchRequestOperationsListener testListener = new SearchRequestOperationsListener() { + + @Override + public void onPhaseStart(SearchPhaseContext context) { + searchPhaseMap.get(context.getCurrentPhase().getSearchPhaseName()).current.inc(); + } + + @Override + public void onPhaseEnd(SearchPhaseContext context) { + searchPhaseMap.get(context.getCurrentPhase().getSearchPhaseName()).current.dec(); + searchPhaseMap.get(context.getCurrentPhase().getSearchPhaseName()).total.inc(); + } + + @Override + public void onPhaseFailure(SearchPhaseContext context) { + searchPhaseMap.get(context.getCurrentPhase().getSearchPhaseName()).current.dec(); + } + }; + + int totalListeners = randomIntBetween(1, 10); + final List requestOperationListeners = new ArrayList<>(); + for (int i = 0; i < totalListeners; i++) { + requestOperationListeners.add(testListener); + } + + SearchRequestOperationsListener compositeListener = new SearchRequestOperationsListener.CompositeListener( + requestOperationListeners, + logger + ); + + SearchPhaseContext ctx = mock(SearchPhaseContext.class); + SearchPhase searchPhase = mock(SearchPhase.class); + + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + when(ctx.getCurrentPhase()).thenReturn(searchPhase); + when(searchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); + compositeListener.onPhaseStart(ctx); + assertEquals(totalListeners, searchPhaseMap.get(searchPhaseName).current.count()); + } + } +} diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestStatsTests.java b/server/src/test/java/org/opensearch/action/search/SearchRequestStatsTests.java new file mode 100644 index 0000000000000..f24147a8194b4 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestStatsTests.java @@ -0,0 +1,150 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.test.OpenSearchTestCase; + +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Phaser; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class SearchRequestStatsTests extends OpenSearchTestCase { + public void testSearchRequestPhaseFailure() { + SearchRequestStats testRequestStats = new SearchRequestStats(); + SearchPhaseContext ctx = mock(SearchPhaseContext.class); + SearchPhase mockSearchPhase = mock(SearchPhase.class); + when(ctx.getCurrentPhase()).thenReturn(mockSearchPhase); + + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + when(mockSearchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); + testRequestStats.onPhaseStart(ctx); + assertEquals(1, testRequestStats.getPhaseCurrent(searchPhaseName)); + testRequestStats.onPhaseFailure(ctx); + assertEquals(0, testRequestStats.getPhaseCurrent(searchPhaseName)); + } + } + + public void testSearchRequestStats() { + SearchRequestStats testRequestStats = new SearchRequestStats(); + + SearchPhaseContext ctx = mock(SearchPhaseContext.class); + SearchPhase mockSearchPhase = mock(SearchPhase.class); + when(ctx.getCurrentPhase()).thenReturn(mockSearchPhase); + + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + when(mockSearchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); + long tookTimeInMillis = randomIntBetween(1, 10); + testRequestStats.onPhaseStart(ctx); + long startTime = System.nanoTime() - TimeUnit.MILLISECONDS.toNanos(tookTimeInMillis); + when(mockSearchPhase.getStartTimeInNanos()).thenReturn(startTime); + assertEquals(1, testRequestStats.getPhaseCurrent(searchPhaseName)); + testRequestStats.onPhaseEnd(ctx); + assertEquals(0, testRequestStats.getPhaseCurrent(searchPhaseName)); + assertEquals(1, testRequestStats.getPhaseTotal(searchPhaseName)); + assertThat(testRequestStats.getPhaseMetric(searchPhaseName), greaterThanOrEqualTo(tookTimeInMillis)); + } + } + + public void testSearchRequestStatsOnPhaseStartConcurrently() throws InterruptedException { + SearchRequestStats testRequestStats = new SearchRequestStats(); + int numTasks = randomIntBetween(5, 50); + Thread[] threads = new Thread[numTasks * SearchPhaseName.values().length]; + Phaser phaser = new Phaser(numTasks * SearchPhaseName.values().length + 1); + CountDownLatch countDownLatch = new CountDownLatch(numTasks * SearchPhaseName.values().length); + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + SearchPhaseContext ctx = mock(SearchPhaseContext.class); + SearchPhase mockSearchPhase = mock(SearchPhase.class); + when(ctx.getCurrentPhase()).thenReturn(mockSearchPhase); + when(mockSearchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); + for (int i = 0; i < numTasks; i++) { + threads[i] = new Thread(() -> { + phaser.arriveAndAwaitAdvance(); + testRequestStats.onPhaseStart(ctx); + countDownLatch.countDown(); + }); + threads[i].start(); + } + } + phaser.arriveAndAwaitAdvance(); + countDownLatch.await(); + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + assertEquals(numTasks, testRequestStats.getPhaseCurrent(searchPhaseName)); + } + } + + public void testSearchRequestStatsOnPhaseEndConcurrently() throws InterruptedException { + SearchRequestStats testRequestStats = new SearchRequestStats(); + int numTasks = randomIntBetween(5, 50); + Thread[] threads = new Thread[numTasks * SearchPhaseName.values().length]; + Phaser phaser = new Phaser(numTasks * SearchPhaseName.values().length + 1); + CountDownLatch countDownLatch = new CountDownLatch(numTasks * SearchPhaseName.values().length); + Map searchPhaseNameLongMap = new HashMap<>(); + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + SearchPhaseContext ctx = mock(SearchPhaseContext.class); + SearchPhase mockSearchPhase = mock(SearchPhase.class); + when(ctx.getCurrentPhase()).thenReturn(mockSearchPhase); + when(mockSearchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); + long tookTimeInMillis = randomIntBetween(1, 10); + long startTime = System.nanoTime() - TimeUnit.MILLISECONDS.toNanos(tookTimeInMillis); + when(mockSearchPhase.getStartTimeInNanos()).thenReturn(startTime); + for (int i = 0; i < numTasks; i++) { + threads[i] = new Thread(() -> { + phaser.arriveAndAwaitAdvance(); + testRequestStats.onPhaseEnd(ctx); + countDownLatch.countDown(); + }); + threads[i].start(); + } + searchPhaseNameLongMap.put(searchPhaseName, tookTimeInMillis); + } + phaser.arriveAndAwaitAdvance(); + countDownLatch.await(); + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + assertEquals(numTasks, testRequestStats.getPhaseTotal(searchPhaseName)); + assertThat( + testRequestStats.getPhaseMetric(searchPhaseName), + greaterThanOrEqualTo((searchPhaseNameLongMap.get(searchPhaseName) * numTasks)) + ); + } + } + + public void testSearchRequestStatsOnPhaseFailureConcurrently() throws InterruptedException { + SearchRequestStats testRequestStats = new SearchRequestStats(); + int numTasks = randomIntBetween(5, 50); + Thread[] threads = new Thread[numTasks * SearchPhaseName.values().length]; + Phaser phaser = new Phaser(numTasks * SearchPhaseName.values().length + 1); + CountDownLatch countDownLatch = new CountDownLatch(numTasks * SearchPhaseName.values().length); + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + SearchPhaseContext ctx = mock(SearchPhaseContext.class); + SearchPhase mockSearchPhase = mock(SearchPhase.class); + when(ctx.getCurrentPhase()).thenReturn(mockSearchPhase); + when(mockSearchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); + for (int i = 0; i < numTasks; i++) { + threads[i] = new Thread(() -> { + phaser.arriveAndAwaitAdvance(); + testRequestStats.onPhaseStart(ctx); + testRequestStats.onPhaseFailure(ctx); + countDownLatch.countDown(); + }); + threads[i].start(); + } + } + phaser.arriveAndAwaitAdvance(); + countDownLatch.await(); + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + assertEquals(0, testRequestStats.getPhaseCurrent(searchPhaseName)); + } + } +} diff --git a/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java b/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java index d1c2dda615992..f37823d2c0c7d 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java @@ -60,6 +60,7 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; +import org.mockito.ArgumentCaptor; import org.mockito.Mockito; import static java.util.Collections.emptyMap; @@ -70,6 +71,9 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; public class CoordinationStateTests extends OpenSearchTestCase { @@ -925,6 +929,7 @@ public void testHandlePrePublishAndCommitWhenRemoteStateEnabled() throws IOExcep final RemoteClusterStateService remoteClusterStateService = Mockito.mock(RemoteClusterStateService.class); final VotingConfiguration initialConfig = VotingConfiguration.of(node1); final ClusterState clusterState = clusterState(0L, 0L, node1, initialConfig, initialConfig, 42L); + final String previousClusterUUID = "prev-cluster-uuid"; final ClusterMetadataManifest manifest = new ClusterMetadataManifest( 0L, 0L, @@ -934,13 +939,17 @@ public void testHandlePrePublishAndCommitWhenRemoteStateEnabled() throws IOExcep randomAlphaOfLength(10), false, Collections.emptyList(), - randomAlphaOfLength(10) + randomAlphaOfLength(10), + true ); - Mockito.when(remoteClusterStateService.writeFullMetadata(clusterState)).thenReturn(manifest); + Mockito.when(remoteClusterStateService.writeFullMetadata(clusterState, previousClusterUUID)).thenReturn(manifest); final PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); persistedStateRegistry.addPersistedState(PersistedStateType.LOCAL, ps1); - persistedStateRegistry.addPersistedState(PersistedStateType.REMOTE, new RemotePersistedState(remoteClusterStateService)); + persistedStateRegistry.addPersistedState( + PersistedStateType.REMOTE, + new RemotePersistedState(remoteClusterStateService, previousClusterUUID) + ); String randomRepoName = "randomRepoName"; String stateRepoTypeAttributeKey = String.format( @@ -963,11 +972,28 @@ public void testHandlePrePublishAndCommitWhenRemoteStateEnabled() throws IOExcep final CoordinationState coordinationState = createCoordinationState(persistedStateRegistry, node1, settings); coordinationState.handlePrePublish(clusterState); - Mockito.verify(remoteClusterStateService, Mockito.times(1)).writeFullMetadata(clusterState); + Mockito.verify(remoteClusterStateService, Mockito.times(1)).writeFullMetadata(clusterState, previousClusterUUID); assertThat(persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE).getLastAcceptedState(), equalTo(clusterState)); coordinationState.handlePreCommit(); - Mockito.verify(remoteClusterStateService, Mockito.times(1)).markLastStateAsCommitted(clusterState, manifest); + ClusterState committedClusterState = ClusterState.builder(clusterState) + .metadata(Metadata.builder(clusterState.metadata()).clusterUUIDCommitted(true).build()) + .build(); + // Mockito.verify(remoteClusterStateService, Mockito.times(1)).markLastStateAsCommitted(committedClusterState, manifest); + ArgumentCaptor clusterStateCaptor = ArgumentCaptor.forClass(ClusterState.class); + verify(remoteClusterStateService, times(1)).markLastStateAsCommitted(clusterStateCaptor.capture(), any()); + assertThat(clusterStateCaptor.getValue().metadata().indices(), equalTo(committedClusterState.metadata().indices())); + assertThat(clusterStateCaptor.getValue().metadata().clusterUUID(), equalTo(committedClusterState.metadata().clusterUUID())); + assertThat(clusterStateCaptor.getValue().stateUUID(), equalTo(committedClusterState.stateUUID())); + assertThat( + clusterStateCaptor.getValue().coordinationMetadata().term(), + equalTo(committedClusterState.coordinationMetadata().term()) + ); + assertThat(clusterStateCaptor.getValue().version(), equalTo(committedClusterState.version())); + assertThat( + clusterStateCaptor.getValue().metadata().clusterUUIDCommitted(), + equalTo(committedClusterState.metadata().clusterUUIDCommitted()) + ); } public static CoordinationState createCoordinationState( diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index 8adaae6d230cd..e40826915c848 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -70,6 +70,8 @@ import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.query.QueryShardContext; +import org.opensearch.index.translog.Translog; +import org.opensearch.indices.IndicesService; import org.opensearch.indices.InvalidAliasNameException; import org.opensearch.indices.InvalidIndexNameException; import org.opensearch.indices.ShardLimitValidator; @@ -132,8 +134,10 @@ import static org.opensearch.index.IndexSettings.INDEX_REFRESH_INTERVAL_SETTING; import static org.opensearch.index.IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING; import static org.opensearch.index.IndexSettings.INDEX_SOFT_DELETES_SETTING; +import static org.opensearch.index.IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; import static org.opensearch.indices.ShardLimitValidatorTests.createTestShardLimitService; import static org.opensearch.node.Node.NODE_ATTRIBUTES; @@ -1725,6 +1729,87 @@ public void testRefreshIntervalValidationFailureWithIndexSetting() { ); } + public void testAnyTranslogDurabilityWhenRestrictSettingFalse() { + // This checks that aggregateIndexSettings works for the case when the cluster setting + // cluster.remote_store.index.restrict.async-durability is false or not set, it allows all types of durability modes + request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); + final Settings.Builder requestSettings = Settings.builder(); + Translog.Durability durability = randomFrom(Translog.Durability.values()); + requestSettings.put(INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), durability); + request.settings(requestSettings.build()); + if (randomBoolean()) { + Settings settings = Settings.builder().put(CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING.getKey(), false).build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + } + Settings indexSettings = aggregateIndexSettings( + ClusterState.EMPTY_STATE, + request, + Settings.EMPTY, + null, + Settings.EMPTY, + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + randomShardLimitService(), + Collections.emptySet(), + clusterSettings + ); + assertFalse(clusterSettings.get(IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING)); + assertEquals(durability, INDEX_TRANSLOG_DURABILITY_SETTING.get(indexSettings)); + } + + public void testAsyncDurabilityThrowsExceptionWhenRestrictSettingTrue() { + // This checks that aggregateIndexSettings works for the case when the cluster setting + // cluster.remote_store.index.restrict.async-durability is false or not set, it allows all types of durability modes + request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); + final Settings.Builder requestSettings = Settings.builder(); + requestSettings.put(INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC); + request.settings(requestSettings.build()); + Settings settings = Settings.builder().put(CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING.getKey(), true).build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + IllegalArgumentException exception = assertThrows( + IllegalArgumentException.class, + () -> aggregateIndexSettings( + ClusterState.EMPTY_STATE, + request, + Settings.EMPTY, + null, + Settings.builder().put("node.attr.remote_store.setting", "test").build(), + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + randomShardLimitService(), + Collections.emptySet(), + clusterSettings + ) + ); + // verify that the message is as expected + assertEquals( + "index setting [index.translog.durability=async] is not allowed as cluster setting [cluster.remote_store.index.restrict.async-durability=true]", + exception.getMessage() + ); + } + + public void testRequestDurabilityWhenRestrictSettingTrue() { + // This checks that aggregateIndexSettings works for the case when the cluster setting + // cluster.remote_store.index.restrict.async-durability is false or not set, it allows all types of durability modes + request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); + final Settings.Builder requestSettings = Settings.builder(); + requestSettings.put(INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.REQUEST); + request.settings(requestSettings.build()); + Settings settings = Settings.builder().put(CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING.getKey(), true).build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + Settings indexSettings = aggregateIndexSettings( + ClusterState.EMPTY_STATE, + request, + Settings.EMPTY, + null, + Settings.EMPTY, + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + randomShardLimitService(), + Collections.emptySet(), + clusterSettings + ); + assertTrue(clusterSettings.get(IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING)); + assertEquals(Translog.Durability.REQUEST, INDEX_TRANSLOG_DURABILITY_SETTING.get(indexSettings)); + } + private IndexTemplateMetadata addMatchingTemplate(Consumer configurator) { IndexTemplateMetadata.Builder builder = templateMetadataBuilder("template1", "te*"); configurator.accept(builder); diff --git a/server/src/test/java/org/opensearch/common/blobstore/AsyncMultiStreamEncryptedBlobContainerTests.java b/server/src/test/java/org/opensearch/common/blobstore/AsyncMultiStreamEncryptedBlobContainerTests.java new file mode 100644 index 0000000000000..947a4f9b1c9ab --- /dev/null +++ b/server/src/test/java/org/opensearch/common/blobstore/AsyncMultiStreamEncryptedBlobContainerTests.java @@ -0,0 +1,121 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.blobstore; + +import org.opensearch.common.Randomness; +import org.opensearch.common.blobstore.stream.read.ReadContext; +import org.opensearch.common.blobstore.stream.read.listener.ListenerTestUtils; +import org.opensearch.common.crypto.CryptoHandler; +import org.opensearch.common.crypto.DecryptedRangedStreamProvider; +import org.opensearch.common.io.InputStreamContainer; +import org.opensearch.core.action.ActionListener; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.util.List; +import java.util.function.UnaryOperator; + +import org.mockito.Mockito; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class AsyncMultiStreamEncryptedBlobContainerTests extends OpenSearchTestCase { + + // Tests the happy path scenario for decrypting a read context + @SuppressWarnings("unchecked") + public void testReadBlobAsync() throws Exception { + String testBlobName = "testBlobName"; + int size = 100; + + // Mock objects needed for the test + AsyncMultiStreamBlobContainer blobContainer = mock(AsyncMultiStreamBlobContainer.class); + CryptoHandler cryptoHandler = mock(CryptoHandler.class); + Object cryptoContext = mock(Object.class); + when(cryptoHandler.loadEncryptionMetadata(any())).thenReturn(cryptoContext); + when(cryptoHandler.estimateDecryptedLength(any(), anyLong())).thenReturn((long) size); + long[] adjustedRanges = { 0, size - 1 }; + DecryptedRangedStreamProvider rangedStreamProvider = new DecryptedRangedStreamProvider(adjustedRanges, UnaryOperator.identity()); + when(cryptoHandler.createDecryptingStreamOfRange(eq(cryptoContext), anyLong(), anyLong())).thenReturn(rangedStreamProvider); + + // Objects needed for API call + final byte[] data = new byte[size]; + Randomness.get().nextBytes(data); + final InputStreamContainer inputStreamContainer = new InputStreamContainer(new ByteArrayInputStream(data), data.length, 0); + final ListenerTestUtils.CountingCompletionListener completionListener = + new ListenerTestUtils.CountingCompletionListener<>(); + final ReadContext readContext = new ReadContext(size, List.of(inputStreamContainer), null); + + Mockito.doAnswer(invocation -> { + ActionListener readContextActionListener = invocation.getArgument(1); + readContextActionListener.onResponse(readContext); + return null; + }).when(blobContainer).readBlobAsync(eq(testBlobName), any()); + + AsyncMultiStreamEncryptedBlobContainer asyncMultiStreamEncryptedBlobContainer = + new AsyncMultiStreamEncryptedBlobContainer<>(blobContainer, cryptoHandler); + asyncMultiStreamEncryptedBlobContainer.readBlobAsync(testBlobName, completionListener); + + // Assert results + ReadContext response = completionListener.getResponse(); + assertEquals(0, completionListener.getFailureCount()); + assertEquals(1, completionListener.getResponseCount()); + assertNull(completionListener.getException()); + + assertTrue(response instanceof AsyncMultiStreamEncryptedBlobContainer.DecryptedReadContext); + assertEquals(1, response.getNumberOfParts()); + assertEquals(size, response.getBlobSize()); + + InputStreamContainer responseContainer = response.getPartStreams().get(0); + assertEquals(0, responseContainer.getOffset()); + assertEquals(size, responseContainer.getContentLength()); + assertEquals(100, responseContainer.getInputStream().available()); + } + + // Tests the exception scenario for decrypting a read context + @SuppressWarnings("unchecked") + public void testReadBlobAsyncException() throws Exception { + String testBlobName = "testBlobName"; + int size = 100; + + // Mock objects needed for the test + AsyncMultiStreamBlobContainer blobContainer = mock(AsyncMultiStreamBlobContainer.class); + CryptoHandler cryptoHandler = mock(CryptoHandler.class); + when(cryptoHandler.loadEncryptionMetadata(any())).thenThrow(new IOException()); + + // Objects needed for API call + final byte[] data = new byte[size]; + Randomness.get().nextBytes(data); + final InputStreamContainer inputStreamContainer = new InputStreamContainer(new ByteArrayInputStream(data), data.length, 0); + final ListenerTestUtils.CountingCompletionListener completionListener = + new ListenerTestUtils.CountingCompletionListener<>(); + final ReadContext readContext = new ReadContext(size, List.of(inputStreamContainer), null); + + Mockito.doAnswer(invocation -> { + ActionListener readContextActionListener = invocation.getArgument(1); + readContextActionListener.onResponse(readContext); + return null; + }).when(blobContainer).readBlobAsync(eq(testBlobName), any()); + + AsyncMultiStreamEncryptedBlobContainer asyncMultiStreamEncryptedBlobContainer = + new AsyncMultiStreamEncryptedBlobContainer<>(blobContainer, cryptoHandler); + asyncMultiStreamEncryptedBlobContainer.readBlobAsync(testBlobName, completionListener); + + // Assert results + assertEquals(1, completionListener.getFailureCount()); + assertEquals(0, completionListener.getResponseCount()); + assertNull(completionListener.getResponse()); + assertTrue(completionListener.getException() instanceof IOException); + } + +} diff --git a/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/ListenerTestUtils.java b/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/ListenerTestUtils.java index 1e9450c83e3ab..a3a32f6db2148 100644 --- a/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/ListenerTestUtils.java +++ b/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/ListenerTestUtils.java @@ -19,7 +19,7 @@ public class ListenerTestUtils { * CountingCompletionListener acts as a verification instance for wrapping listener based calls. * Keeps track of the last response, failure and count of response and failure invocations. */ - static class CountingCompletionListener implements ActionListener { + public static class CountingCompletionListener implements ActionListener { private int responseCount; private int failureCount; private T response; diff --git a/server/src/test/java/org/opensearch/core/RestStatusTests.java b/server/src/test/java/org/opensearch/core/RestStatusTests.java new file mode 100644 index 0000000000000..f8dba99aa8b60 --- /dev/null +++ b/server/src/test/java/org/opensearch/core/RestStatusTests.java @@ -0,0 +1,91 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.core; + +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.action.ShardOperationFailedException; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.PriorityQueue; + +public class RestStatusTests extends OpenSearchTestCase { + + public void testStatusReturns200ForNoFailures() { + int totalShards = randomIntBetween(1, 100); + int successfulShards = randomIntBetween(1, totalShards); + + assertEquals(RestStatus.OK, RestStatus.status(successfulShards, totalShards)); + } + + public void testStatusReturns503ForUnavailableShards() { + int totalShards = randomIntBetween(1, 100); + int successfulShards = 0; + + assertEquals(RestStatus.SERVICE_UNAVAILABLE, RestStatus.status(successfulShards, totalShards)); + } + + public void testStatusReturnsFailureStatusWhenFailuresExist() { + int totalShards = randomIntBetween(1, 100); + int successfulShards = 0; + + TestException[] failures = new TestException[totalShards]; + PriorityQueue heapOfFailures = new PriorityQueue<>((x, y) -> y.status().compareTo(x.status())); + + for (int i = 0; i < totalShards; ++i) { + /* + * Status here doesn't need to convey failure and is not as per rest + * contract. We're not testing the contract, but if status() returns + * the greatest rest code from the failures selection + */ + RestStatus status = randomFrom(RestStatus.values()); + TestException failure = new TestException(status); + + failures[i] = failure; + heapOfFailures.add(failure); + } + + assertEquals(heapOfFailures.peek().status(), RestStatus.status(successfulShards, totalShards, failures)); + } + + public void testSerialization() throws IOException { + final RestStatus status = randomFrom(RestStatus.values()); + + try (BytesStreamOutput out = new BytesStreamOutput()) { + RestStatus.writeTo(out, status); + + try (StreamInput in = out.bytes().streamInput()) { + RestStatus deserializedStatus = RestStatus.readFrom(in); + + assertEquals(status, deserializedStatus); + } + } + } + + private static class TestException extends ShardOperationFailedException { + TestException(final RestStatus status) { + super("super-idx", randomInt(), "gone-fishing", status, new Throwable("cake")); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + throw new IOException("not implemented"); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + throw new IOException("not implemented"); + } + } + +} diff --git a/server/src/test/java/org/opensearch/discovery/InitializeExtensionRequestTests.java b/server/src/test/java/org/opensearch/discovery/InitializeExtensionRequestTests.java index ca94a3b52a56c..daa42d3abbc50 100644 --- a/server/src/test/java/org/opensearch/discovery/InitializeExtensionRequestTests.java +++ b/server/src/test/java/org/opensearch/discovery/InitializeExtensionRequestTests.java @@ -27,6 +27,7 @@ public class InitializeExtensionRequestTests extends OpenSearchTestCase { public void testInitializeExtensionRequest() throws Exception { String expectedUniqueId = "test uniqueid"; Version expectedVersion = Version.fromString("2.0.0"); + String expectedServiceAccountHeader = "test"; ExtensionDependency expectedDependency = new ExtensionDependency(expectedUniqueId, expectedVersion); DiscoveryExtensionNode expectedExtensionNode = new DiscoveryExtensionNode( "firstExtension", @@ -46,9 +47,14 @@ public void testInitializeExtensionRequest() throws Exception { Version.CURRENT ); - InitializeExtensionRequest initializeExtensionRequest = new InitializeExtensionRequest(expectedSourceNode, expectedExtensionNode); + InitializeExtensionRequest initializeExtensionRequest = new InitializeExtensionRequest( + expectedSourceNode, + expectedExtensionNode, + expectedServiceAccountHeader + ); assertEquals(expectedExtensionNode, initializeExtensionRequest.getExtension()); assertEquals(expectedSourceNode, initializeExtensionRequest.getSourceNode()); + assertEquals(expectedServiceAccountHeader, initializeExtensionRequest.getServiceAccountHeader()); try (BytesStreamOutput out = new BytesStreamOutput()) { initializeExtensionRequest.writeTo(out); @@ -58,6 +64,7 @@ public void testInitializeExtensionRequest() throws Exception { assertEquals(expectedExtensionNode, initializeExtensionRequest.getExtension()); assertEquals(expectedSourceNode, initializeExtensionRequest.getSourceNode()); + assertEquals(expectedServiceAccountHeader, initializeExtensionRequest.getServiceAccountHeader()); } } } diff --git a/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java b/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java index 57883f1e5914a..f243a924f4e63 100644 --- a/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java +++ b/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java @@ -184,7 +184,7 @@ public void testLoadExtensions() throws Exception { Set> additionalSettings = extAwarePlugin.getExtensionSettings().stream().collect(Collectors.toSet()); ExtensionScopedSettings extensionScopedSettings = new ExtensionScopedSettings(additionalSettings); - ExtensionsManager extensionsManager = new ExtensionsManager(additionalSettings); + ExtensionsManager extensionsManager = new ExtensionsManager(additionalSettings, identityService); ExtensionDependency dependentExtension = new ExtensionDependency("uniqueid0", Version.fromString("2.0.0")); Extension firstExtension = new Extension( @@ -280,7 +280,7 @@ public void testNonUniqueLoadedExtensions() throws Exception { null, null ); - ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); extensionsManager.loadExtension(firstExtension); IOException exception = expectThrows(IOException.class, () -> extensionsManager.loadExtension(secondExtension)); assertEquals( @@ -319,7 +319,7 @@ public void testNonUniqueLoadedExtensions() throws Exception { public void testMissingRequiredFieldsWhileLoadingExtension() throws Exception { Extension firstExtension = new Extension("firstExtension", "uniqueid1", "127.0.0.0", "9300", "0.0.7", "3.0.0", "", null, null); - ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); IOException exception = expectThrows(IOException.class, () -> extensionsManager.loadExtension(firstExtension)); assertEquals("Required field [minimum opensearch version] is missing in the request", exception.getMessage()); @@ -376,7 +376,7 @@ public void testExtensionDependency() throws Exception { } public void testInitialize() throws Exception { - ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); initialize(extensionsManager); @@ -418,7 +418,7 @@ public void testInitialize() throws Exception { public void testHandleRegisterRestActionsRequest() throws Exception { - ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; @@ -432,7 +432,7 @@ public void testHandleRegisterRestActionsRequest() throws Exception { } public void testHandleRegisterSettingsRequest() throws Exception { - ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; @@ -448,7 +448,7 @@ public void testHandleRegisterSettingsRequest() throws Exception { } public void testHandleRegisterRestActionsRequestWithInvalidMethod() throws Exception { - ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; @@ -463,7 +463,7 @@ public void testHandleRegisterRestActionsRequestWithInvalidMethod() throws Excep } public void testHandleRegisterRestActionsRequestWithInvalidDeprecatedMethod() throws Exception { - ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; @@ -478,7 +478,7 @@ public void testHandleRegisterRestActionsRequestWithInvalidDeprecatedMethod() th } public void testHandleRegisterRestActionsRequestWithInvalidUri() throws Exception { - ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; List actionsList = List.of("GET", "PUT /bar", "POST /baz"); @@ -492,7 +492,7 @@ public void testHandleRegisterRestActionsRequestWithInvalidUri() throws Exceptio } public void testHandleRegisterRestActionsRequestWithInvalidDeprecatedUri() throws Exception { - ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; List actionsList = List.of("GET /foo", "PUT /bar", "POST /baz"); @@ -506,7 +506,7 @@ public void testHandleRegisterRestActionsRequestWithInvalidDeprecatedUri() throw } public void testHandleExtensionRequest() throws Exception { - ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); initialize(extensionsManager); ExtensionRequest clusterStateRequest = new ExtensionRequest(ExtensionRequestProto.RequestType.REQUEST_EXTENSION_CLUSTER_STATE); @@ -660,7 +660,7 @@ public void testEnvironmentSettingsDefaultValue() throws Exception { } public void testAddSettingsUpdateConsumerRequest() throws Exception { - ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); initialize(extensionsManager); List> componentSettings = List.of( @@ -704,7 +704,7 @@ public void testAddSettingsUpdateConsumerRequest() throws Exception { } public void testHandleAddSettingsUpdateConsumerRequest() throws Exception { - ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); initialize(extensionsManager); List> componentSettings = List.of( @@ -724,7 +724,7 @@ public void testHandleAddSettingsUpdateConsumerRequest() throws Exception { } public void testUpdateSettingsRequest() throws Exception { - ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); initialize(extensionsManager); Setting componentSetting = Setting.boolSetting("falseSetting", false, Property.Dynamic); @@ -753,7 +753,7 @@ public void testUpdateSettingsRequest() throws Exception { public void testRegisterHandler() throws Exception { - ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); TransportService mockTransportService = spy( new TransportService( @@ -781,7 +781,7 @@ public void testRegisterHandler() throws Exception { } public void testIncompatibleExtensionRegistration() throws IOException { - ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); Extension firstExtension = new Extension( "firstExtension", "uniqueid1", @@ -822,7 +822,7 @@ public List> getExtensionSettings() { extensionScopedSettings ); - ExtensionsManager extensionsManager = new ExtensionsManager(additionalSettings); + ExtensionsManager extensionsManager = new ExtensionsManager(additionalSettings, identityService); extensionsManager.loadExtension(firstExtension); DiscoveryExtensionNode extension = new DiscoveryExtensionNode( @@ -860,7 +860,7 @@ public void testAdditionalExtensionSettingsForExtensionWithoutCustomSettingSet() extensionScopedSettings ); - ExtensionsManager extensionsManager = new ExtensionsManager(additionalSettings); + ExtensionsManager extensionsManager = new ExtensionsManager(additionalSettings, identityService); extensionsManager.loadExtension(firstExtension); DiscoveryExtensionNode extension = new DiscoveryExtensionNode( diff --git a/server/src/test/java/org/opensearch/extensions/rest/RestInitializeExtensionActionTests.java b/server/src/test/java/org/opensearch/extensions/rest/RestInitializeExtensionActionTests.java index 87767978147cd..cdddf8e9be1be 100644 --- a/server/src/test/java/org/opensearch/extensions/rest/RestInitializeExtensionActionTests.java +++ b/server/src/test/java/org/opensearch/extensions/rest/RestInitializeExtensionActionTests.java @@ -21,6 +21,7 @@ import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.extensions.ExtensionsManager; import org.opensearch.extensions.ExtensionsSettings; +import org.opensearch.identity.IdentityService; import org.opensearch.rest.RestRequest; import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; @@ -118,7 +119,7 @@ public void testRestInitializeExtensionActionResponse() throws Exception { } public void testRestInitializeExtensionActionFailure() throws Exception { - ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), new IdentityService(Settings.EMPTY, List.of())); RestInitializeExtensionAction restInitializeExtensionAction = new RestInitializeExtensionAction(extensionsManager); final String content = "{\"name\":\"ad-extension\",\"uniqueId\":\"\",\"hostAddress\":\"127.0.0.1\"," @@ -151,7 +152,10 @@ public void testRestInitializeExtensionActionResponseWithAdditionalSettings() th Function.identity(), Setting.Property.ExtensionScope ); - ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(boolSetting, stringSetting, intSetting, listSetting)); + ExtensionsManager extensionsManager = new ExtensionsManager( + Set.of(boolSetting, stringSetting, intSetting, listSetting), + new IdentityService(Settings.EMPTY, List.of()) + ); ExtensionsManager spy = spy(extensionsManager); // optionally, you can stub out some methods: @@ -198,7 +202,10 @@ public void testRestInitializeExtensionActionResponseWithAdditionalSettingsUsing Function.identity(), Setting.Property.ExtensionScope ); - ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(boolSetting, stringSetting, intSetting, listSetting)); + ExtensionsManager extensionsManager = new ExtensionsManager( + Set.of(boolSetting, stringSetting, intSetting, listSetting), + new IdentityService(Settings.EMPTY, List.of()) + ); ExtensionsManager spy = spy(extensionsManager); // optionally, you can stub out some methods: diff --git a/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java b/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java index 992e37b3f120a..ee36ea170e270 100644 --- a/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java +++ b/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java @@ -122,7 +122,7 @@ public void setup() throws Exception { usageService, null, new IdentityService(Settings.EMPTY, new ArrayList<>()), - new ExtensionsManager(Set.of()) + new ExtensionsManager(Set.of(), new IdentityService(Settings.EMPTY, List.of())) ); identityService = new IdentityService(Settings.EMPTY, new ArrayList<>()); dynamicActionRegistry = actionModule.getDynamicActionRegistry(); diff --git a/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java b/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java index 486717faaf864..c7ed1cb732154 100644 --- a/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java +++ b/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java @@ -716,10 +716,11 @@ Directory createDirectory(Path path) { public void testRemotePersistedState() throws IOException { final RemoteClusterStateService remoteClusterStateService = Mockito.mock(RemoteClusterStateService.class); final ClusterMetadataManifest manifest = ClusterMetadataManifest.builder().clusterTerm(1L).stateVersion(5L).build(); - Mockito.when(remoteClusterStateService.writeFullMetadata(Mockito.any())).thenReturn(manifest); + final String previousClusterUUID = "prev-cluster-uuid"; + Mockito.when(remoteClusterStateService.writeFullMetadata(Mockito.any(), Mockito.any())).thenReturn(manifest); Mockito.when(remoteClusterStateService.writeIncrementalMetadata(Mockito.any(), Mockito.any(), Mockito.any())).thenReturn(manifest); - CoordinationState.PersistedState remotePersistedState = new RemotePersistedState(remoteClusterStateService); + CoordinationState.PersistedState remotePersistedState = new RemotePersistedState(remoteClusterStateService, previousClusterUUID); assertThat(remotePersistedState.getLastAcceptedState(), nullValue()); assertThat(remotePersistedState.getCurrentTerm(), equalTo(0L)); @@ -731,7 +732,7 @@ public void testRemotePersistedState() throws IOException { ); remotePersistedState.setLastAcceptedState(clusterState); - Mockito.verify(remoteClusterStateService).writeFullMetadata(clusterState); + Mockito.verify(remoteClusterStateService).writeFullMetadata(clusterState, previousClusterUUID); assertThat(remotePersistedState.getLastAcceptedState(), equalTo(clusterState)); assertThat(remotePersistedState.getCurrentTerm(), equalTo(clusterTerm)); @@ -742,7 +743,7 @@ public void testRemotePersistedState() throws IOException { ); remotePersistedState.setLastAcceptedState(secondClusterState); - Mockito.verify(remoteClusterStateService, times(1)).writeFullMetadata(secondClusterState); + Mockito.verify(remoteClusterStateService, times(1)).writeFullMetadata(secondClusterState, previousClusterUUID); assertThat(remotePersistedState.getLastAcceptedState(), equalTo(secondClusterState)); assertThat(remotePersistedState.getCurrentTerm(), equalTo(clusterTerm)); @@ -752,14 +753,22 @@ public void testRemotePersistedState() throws IOException { assertThat(remotePersistedState.getLastAcceptedState(), equalTo(secondClusterState)); assertThat(remotePersistedState.getCurrentTerm(), equalTo(clusterTerm)); + assertThat(remotePersistedState.getLastAcceptedState().metadata().clusterUUIDCommitted(), equalTo(false)); + final ClusterState thirdClusterState = ClusterState.builder(secondClusterState) + .metadata(Metadata.builder(secondClusterState.getMetadata()).clusterUUID(randomAlphaOfLength(10)).build()) + .build(); + remotePersistedState.setLastAcceptedState(thirdClusterState); + remotePersistedState.markLastAcceptedStateAsCommitted(); + assertThat(remotePersistedState.getLastAcceptedState().metadata().clusterUUIDCommitted(), equalTo(true)); } public void testRemotePersistedStateExceptionOnFullStateUpload() throws IOException { final RemoteClusterStateService remoteClusterStateService = Mockito.mock(RemoteClusterStateService.class); - Mockito.doThrow(IOException.class).when(remoteClusterStateService).writeFullMetadata(Mockito.any()); + final String previousClusterUUID = "prev-cluster-uuid"; + Mockito.doThrow(IOException.class).when(remoteClusterStateService).writeFullMetadata(Mockito.any(), Mockito.any()); - CoordinationState.PersistedState remotePersistedState = new RemotePersistedState(remoteClusterStateService); + CoordinationState.PersistedState remotePersistedState = new RemotePersistedState(remoteClusterStateService, previousClusterUUID); final long clusterTerm = randomNonNegativeLong(); final ClusterState clusterState = createClusterState( diff --git a/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java b/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java index 9f8dde5ba9d45..66426c2a880a3 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java @@ -37,7 +37,8 @@ public void testClusterMetadataManifestXContent() throws IOException { "test-node-id", false, Collections.singletonList(uploadedIndexMetadata), - "prev-cluster-uuid" + "prev-cluster-uuid", + true ); final XContentBuilder builder = JsonXContent.contentBuilder(); builder.startObject(); @@ -60,7 +61,8 @@ public void testClusterMetadataManifestSerializationEqualsHashCode() { "B10RX1f5RJenMQvYccCgSQ", true, randomUploadedIndexMetadataList(), - "yfObdx8KSMKKrXf8UyHhM" + "yfObdx8KSMKKrXf8UyHhM", + true ); { // Mutate Cluster Term EqualsHashCodeTestUtils.checkEqualsAndHashCode( @@ -183,6 +185,21 @@ public void testClusterMetadataManifestSerializationEqualsHashCode() { ); } + { // Mutate cluster uuid committed + EqualsHashCodeTestUtils.checkEqualsAndHashCode( + initialManifest, + orig -> OpenSearchTestCase.copyWriteable( + orig, + new NamedWriteableRegistry(Collections.emptyList()), + ClusterMetadataManifest::new + ), + manifest -> { + ClusterMetadataManifest.Builder builder = ClusterMetadataManifest.builder(manifest); + builder.clusterUUIDCommitted(false); + return builder.build(); + } + ); + } } private List randomUploadedIndexMetadataList() { diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java index 9f5067420aab1..65166386733c6 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java @@ -135,7 +135,7 @@ public void teardown() throws Exception { public void testFailWriteFullMetadataNonClusterManagerNode() throws IOException { final ClusterState clusterState = generateClusterStateWithOneIndex().build(); - final ClusterMetadataManifest manifest = remoteClusterStateService.writeFullMetadata(clusterState); + final ClusterMetadataManifest manifest = remoteClusterStateService.writeFullMetadata(clusterState, randomAlphaOfLength(10)); Assert.assertThat(manifest, nullValue()); } @@ -169,7 +169,7 @@ public void testWriteFullMetadataSuccess() throws IOException { final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); mockBlobStoreObjects(); remoteClusterStateService.start(); - final ClusterMetadataManifest manifest = remoteClusterStateService.writeFullMetadata(clusterState); + final ClusterMetadataManifest manifest = remoteClusterStateService.writeFullMetadata(clusterState, "prev-cluster-uuid"); final UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "index-uuid", "metadata-filename"); List indices = List.of(uploadedIndexMetadata); @@ -190,6 +190,7 @@ public void testWriteFullMetadataSuccess() throws IOException { assertThat(manifest.getStateVersion(), is(expectedManifest.getStateVersion())); assertThat(manifest.getClusterUUID(), is(expectedManifest.getClusterUUID())); assertThat(manifest.getStateUUID(), is(expectedManifest.getStateUUID())); + assertThat(manifest.getPreviousClusterUUID(), is(expectedManifest.getPreviousClusterUUID())); } public void testWriteFullMetadataInParallelSuccess() throws IOException { @@ -205,7 +206,7 @@ public void testWriteFullMetadataInParallelSuccess() throws IOException { }).when(container).asyncBlobUpload(writeContextArgumentCaptor.capture(), actionListenerArgumentCaptor.capture()); remoteClusterStateService.start(); - final ClusterMetadataManifest manifest = remoteClusterStateService.writeFullMetadata(clusterState); + final ClusterMetadataManifest manifest = remoteClusterStateService.writeFullMetadata(clusterState, "prev-cluster-uuid"); final UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "index-uuid", "metadata-filename"); List indices = List.of(uploadedIndexMetadata); @@ -216,6 +217,7 @@ public void testWriteFullMetadataInParallelSuccess() throws IOException { .stateVersion(1L) .stateUUID("state-uuid") .clusterUUID("cluster-uuid") + .previousClusterUUID("prev-cluster-uuid") .build(); assertThat(manifest.getIndices().size(), is(1)); @@ -226,6 +228,7 @@ public void testWriteFullMetadataInParallelSuccess() throws IOException { assertThat(manifest.getStateVersion(), is(expectedManifest.getStateVersion())); assertThat(manifest.getClusterUUID(), is(expectedManifest.getClusterUUID())); assertThat(manifest.getStateUUID(), is(expectedManifest.getStateUUID())); + assertThat(manifest.getPreviousClusterUUID(), is(expectedManifest.getPreviousClusterUUID())); assertEquals(actionListenerArgumentCaptor.getAllValues().size(), 1); assertEquals(writeContextArgumentCaptor.getAllValues().size(), 1); @@ -266,7 +269,7 @@ public void testWriteFullMetadataInParallelFailure() throws IOException { remoteClusterStateService.start(); assertThrows( RemoteClusterStateService.IndexMetadataTransferException.class, - () -> remoteClusterStateService.writeFullMetadata(clusterState) + () -> remoteClusterStateService.writeFullMetadata(clusterState, randomAlphaOfLength(10)) ); } @@ -571,12 +574,43 @@ public void testGetValidPreviousClusterUUID() throws IOException { public void testGetValidPreviousClusterUUIDForInvalidChain() throws IOException { Map clusterUUIDsPointers = Map.of( + "cluster-uuid2", "cluster-uuid1", - ClusterState.UNKNOWN_UUID, + "cluster-uuid3", + "cluster-uuid2", + "cluster-uuid5", + "cluster-uuid4" + ); + mockObjectsForGettingPreviousClusterUUID(clusterUUIDsPointers); + + remoteClusterStateService.start(); + assertThrows(IllegalStateException.class, () -> remoteClusterStateService.getLastKnownUUIDFromRemote("test-cluster")); + } + + public void testGetValidPreviousClusterUUIDWithMultipleChains() throws IOException { + Map clusterUUIDsPointers = Map.of( "cluster-uuid2", + "cluster-uuid1", + "cluster-uuid1", ClusterState.UNKNOWN_UUID, "cluster-uuid3", - "cluster-uuid2" + "cluster-uuid1" + ); + mockObjectsForGettingPreviousClusterUUID(clusterUUIDsPointers); + + remoteClusterStateService.start(); + String previousClusterUUID = remoteClusterStateService.getLastKnownUUIDFromRemote("test-cluster"); + assertThat(previousClusterUUID, equalTo("cluster-uuid3")); + } + + public void testGetValidPreviousClusterUUIDWithInvalidMultipleChains() throws IOException { + Map clusterUUIDsPointers = Map.of( + "cluster-uuid1", + ClusterState.UNKNOWN_UUID, + "cluster-uuid2", + "cluster-uuid1", + "cluster-uuid3", + ClusterState.UNKNOWN_UUID ); mockObjectsForGettingPreviousClusterUUID(clusterUUIDsPointers); @@ -598,42 +632,92 @@ private void mockObjectsForGettingPreviousClusterUUID(Map cluste when(blobContainer3.path()).thenReturn(blobPath); mockBlobContainerForClusterUUIDs(uuidBlobContainer, clusterUUIDsPointers.keySet()); + List uploadedIndexMetadataList1 = List.of( + new UploadedIndexMetadata("index1", "index-uuid1", "key1"), + new UploadedIndexMetadata("index2", "index-uuid2", "key2") + ); final ClusterMetadataManifest clusterManifest1 = generateClusterMetadataManifest( "cluster-uuid1", clusterUUIDsPointers.get("cluster-uuid1"), - randomAlphaOfLength(10) + randomAlphaOfLength(10), + uploadedIndexMetadataList1 ); - mockBlobContainer(blobContainer1, clusterManifest1, Map.of()); + Settings indexSettings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT).build(); + IndexMetadata indexMetadata1 = IndexMetadata.builder("index1") + .settings(indexSettings) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + IndexMetadata indexMetadata2 = IndexMetadata.builder("index2") + .settings(indexSettings) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + Map indexMetadataMap1 = Map.of("index-uuid1", indexMetadata1, "index-uuid2", indexMetadata2); + mockBlobContainer(blobContainer1, clusterManifest1, indexMetadataMap1); + List uploadedIndexMetadataList2 = List.of( + new UploadedIndexMetadata("index1", "index-uuid1", "key1"), + new UploadedIndexMetadata("index2", "index-uuid2", "key2") + ); final ClusterMetadataManifest clusterManifest2 = generateClusterMetadataManifest( "cluster-uuid2", clusterUUIDsPointers.get("cluster-uuid2"), - randomAlphaOfLength(10) + randomAlphaOfLength(10), + uploadedIndexMetadataList2 ); - mockBlobContainer(blobContainer2, clusterManifest2, Map.of()); + IndexMetadata indexMetadata3 = IndexMetadata.builder("index1") + .settings(indexSettings) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + IndexMetadata indexMetadata4 = IndexMetadata.builder("index2") + .settings(indexSettings) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + Map indexMetadataMap2 = Map.of("index-uuid1", indexMetadata3, "index-uuid2", indexMetadata4); + mockBlobContainer(blobContainer2, clusterManifest2, indexMetadataMap2); + List uploadedIndexMetadataList3 = List.of(new UploadedIndexMetadata("index1", "index-uuid1", "key1")); final ClusterMetadataManifest clusterManifest3 = generateClusterMetadataManifest( "cluster-uuid3", clusterUUIDsPointers.get("cluster-uuid3"), - randomAlphaOfLength(10) + randomAlphaOfLength(10), + uploadedIndexMetadataList3 ); - mockBlobContainer(blobContainer3, clusterManifest3, Map.of()); + IndexMetadata indexMetadata5 = IndexMetadata.builder("index1") + .settings(indexSettings) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + Map indexMetadataMap3 = Map.of("index-uuid1", indexMetadata5); + mockBlobContainer(blobContainer3, clusterManifest3, indexMetadataMap3); when(blobStore.blobContainer(ArgumentMatchers.any())).thenReturn( uuidBlobContainer, blobContainer1, blobContainer1, + blobContainer3, + blobContainer3, blobContainer2, blobContainer2, - blobContainer3, - blobContainer3 + blobContainer1, + blobContainer2, + blobContainer1, + blobContainer2 ); when(blobStoreRepository.getCompressor()).thenReturn(new DeflateCompressor()); } - private ClusterMetadataManifest generateClusterMetadataManifest(String clusterUUID, String previousClusterUUID, String stateUUID) { + private ClusterMetadataManifest generateClusterMetadataManifest( + String clusterUUID, + String previousClusterUUID, + String stateUUID, + List uploadedIndexMetadata + ) { return ClusterMetadataManifest.builder() - .indices(List.of()) + .indices(uploadedIndexMetadata) .clusterTerm(1L) .stateVersion(1L) .stateUUID(stateUUID) @@ -642,6 +726,7 @@ private ClusterMetadataManifest generateClusterMetadataManifest(String clusterUU .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) .previousClusterUUID(previousClusterUUID) .committed(true) + .clusterUUIDCommitted(true) .build(); } diff --git a/server/src/test/java/org/opensearch/index/OpenSearchTieredMergePolicyTests.java b/server/src/test/java/org/opensearch/index/OpenSearchTieredMergePolicyTests.java index cbc439041666f..614dacd457782 100644 --- a/server/src/test/java/org/opensearch/index/OpenSearchTieredMergePolicyTests.java +++ b/server/src/test/java/org/opensearch/index/OpenSearchTieredMergePolicyTests.java @@ -32,9 +32,18 @@ package org.opensearch.index; +import org.apache.lucene.index.MergePolicy; +import org.apache.lucene.index.SegmentCommitInfo; +import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.index.TieredMergePolicy; +import org.apache.lucene.util.InfoStream; +import org.apache.lucene.util.Version; import org.opensearch.test.OpenSearchTestCase; +import java.io.IOException; +import java.util.Collections; +import java.util.Set; + public class OpenSearchTieredMergePolicyTests extends OpenSearchTestCase { public void testDefaults() { @@ -80,4 +89,32 @@ public void testSetDeletesPctAllowed() { policy.setDeletesPctAllowed(42); assertEquals(42, policy.regularMergePolicy.getDeletesPctAllowed(), 0); } + + public void testFindDeleteMergesReturnsNullOnEmptySegmentInfos() throws IOException { + MergePolicy.MergeSpecification mergeSpecification = new OpenSearchTieredMergePolicy().findForcedDeletesMerges( + new SegmentInfos(Version.LATEST.major), + new MergePolicy.MergeContext() { + @Override + public int numDeletesToMerge(SegmentCommitInfo info) { + return 0; + } + + @Override + public int numDeletedDocs(SegmentCommitInfo info) { + return 0; + } + + @Override + public InfoStream getInfoStream() { + return InfoStream.NO_OUTPUT; + } + + @Override + public Set getMergingSegments() { + return Collections.emptySet(); + } + } + ); + assertNull(mergeSpecification); + } } diff --git a/server/src/test/java/org/opensearch/index/query/BoolQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/BoolQueryBuilderTests.java index 2160ab6220866..d0f26f3026789 100644 --- a/server/src/test/java/org/opensearch/index/query/BoolQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/BoolQueryBuilderTests.java @@ -47,10 +47,13 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Set; import static org.opensearch.index.query.QueryBuilders.boolQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; @@ -456,4 +459,26 @@ public void testMustRewrite() throws IOException { IllegalStateException e = expectThrows(IllegalStateException.class, () -> boolQuery.toQuery(context)); assertEquals("Rewrite first", e.getMessage()); } + + public void testVisit() { + BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder(); + boolQueryBuilder.should(new TermQueryBuilder(TEXT_FIELD_NAME, "should")); + boolQueryBuilder.must(new TermQueryBuilder(TEXT_FIELD_NAME, "must1")); + boolQueryBuilder.must(new TermQueryBuilder(TEXT_FIELD_NAME, "must2")); // Add a second one to confirm that they both get visited + boolQueryBuilder.mustNot(new TermQueryBuilder(TEXT_FIELD_NAME, "mustNot")); + boolQueryBuilder.filter(new TermQueryBuilder(TEXT_FIELD_NAME, "filter")); + List visitedQueries = new ArrayList<>(); + boolQueryBuilder.visit(createTestVisitor(visitedQueries)); + assertEquals(6, visitedQueries.size()); + Set set = new HashSet<>(Arrays.asList("should", "must1", "must2", "mustNot", "filter")); + + for (QueryBuilder qb : visitedQueries) { + if (qb instanceof TermQueryBuilder) { + set.remove(((TermQueryBuilder) qb).value()); + } + } + + assertEquals(0, set.size()); + + } } diff --git a/server/src/test/java/org/opensearch/index/query/BoostingQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/BoostingQueryBuilderTests.java index 94ded24975be4..66a02a02d4e5b 100644 --- a/server/src/test/java/org/opensearch/index/query/BoostingQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/BoostingQueryBuilderTests.java @@ -38,6 +38,8 @@ import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.nullValue; @@ -153,4 +155,16 @@ public void testMustRewrite() throws IOException { e = expectThrows(IllegalStateException.class, () -> queryBuilder2.toQuery(context)); assertEquals("Rewrite first", e.getMessage()); } + + public void testVisit() { + BoostingQueryBuilder builder = new BoostingQueryBuilder( + new TermQueryBuilder("unmapped_field", "value"), + new TermQueryBuilder(KEYWORD_FIELD_NAME, "other_value") + ); + + List visitedQueries = new ArrayList<>(); + builder.visit(createTestVisitor(visitedQueries)); + + assertEquals(3, visitedQueries.size()); + } } diff --git a/server/src/test/java/org/opensearch/index/query/ConstantScoreQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/ConstantScoreQueryBuilderTests.java index 2bfe964ce7259..527413d2513d0 100644 --- a/server/src/test/java/org/opensearch/index/query/ConstantScoreQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/ConstantScoreQueryBuilderTests.java @@ -39,6 +39,8 @@ import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.nullValue; @@ -133,4 +135,12 @@ public void testMustRewrite() throws IOException { IllegalStateException e = expectThrows(IllegalStateException.class, () -> queryBuilder.toQuery(context)); assertEquals("Rewrite first", e.getMessage()); } + + public void testVisit() { + ConstantScoreQueryBuilder queryBuilder = new ConstantScoreQueryBuilder(new TermQueryBuilder("unmapped_field", "foo")); + List visitorQueries = new ArrayList<>(); + queryBuilder.visit(createTestVisitor(visitorQueries)); + + assertEquals(2, visitorQueries.size()); + } } diff --git a/server/src/test/java/org/opensearch/index/query/DisMaxQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/DisMaxQueryBuilderTests.java index 8d589bd76f2bb..cb0df38de5c02 100644 --- a/server/src/test/java/org/opensearch/index/query/DisMaxQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/DisMaxQueryBuilderTests.java @@ -41,6 +41,7 @@ import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.List; @@ -156,4 +157,14 @@ public void testRewriteMultipleTimes() throws IOException { assertEquals(rewrittenAgain, expected); assertEquals(Rewriteable.rewrite(dismax, createShardContext()), expected); } + + public void testVisit() { + DisMaxQueryBuilder dismax = new DisMaxQueryBuilder(); + dismax.add(new WrapperQueryBuilder(new WrapperQueryBuilder(new MatchAllQueryBuilder().toString()).toString())); + + List visitedQueries = new ArrayList<>(); + dismax.visit(createTestVisitor(visitedQueries)); + + assertEquals(2, visitedQueries.size()); + } } diff --git a/server/src/test/java/org/opensearch/index/query/FieldMaskingSpanQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/FieldMaskingSpanQueryBuilderTests.java index 7e893a0947531..db0a7bf1795ff 100644 --- a/server/src/test/java/org/opensearch/index/query/FieldMaskingSpanQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/FieldMaskingSpanQueryBuilderTests.java @@ -41,6 +41,8 @@ import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import static org.opensearch.index.query.FieldMaskingSpanQueryBuilder.SPAN_FIELD_MASKING_FIELD; import static org.hamcrest.CoreMatchers.equalTo; @@ -147,4 +149,10 @@ public void testDeprecatedName() throws IOException { "Deprecated field [field_masking_span] used, expected [" + SPAN_FIELD_MASKING_FIELD.getPreferredName() + "] instead" ); } + + public void testVisit() { + List visitedQueries = new ArrayList<>(); + doCreateTestQueryBuilder().visit(createTestVisitor(visitedQueries)); + assertEquals(2, visitedQueries.size()); + } } diff --git a/server/src/test/java/org/opensearch/index/query/QueryBuilderVisitorTests.java b/server/src/test/java/org/opensearch/index/query/QueryBuilderVisitorTests.java new file mode 100644 index 0000000000000..7849d3985ca59 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/query/QueryBuilderVisitorTests.java @@ -0,0 +1,33 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.query; + +import org.apache.lucene.search.BooleanClause; +import org.opensearch.test.AbstractBuilderTestCase; + +import java.util.ArrayList; +import java.util.List; + +public class QueryBuilderVisitorTests extends AbstractBuilderTestCase { + + public void testNoOpsVisitor() { + BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder(); + + List visitedQueries = new ArrayList<>(); + QueryBuilderVisitor qbv = createTestVisitor(visitedQueries); + boolQueryBuilder.visit(qbv); + QueryBuilderVisitor subQbv = qbv.getChildVisitor(BooleanClause.Occur.MUST_NOT); + assertEquals(0, visitedQueries.size()); + assertEquals(qbv, subQbv); + } + + protected static QueryBuilderVisitor createTestVisitor(List visitedQueries) { + return QueryBuilderVisitor.NO_OP_VISITOR; + } +} diff --git a/server/src/test/java/org/opensearch/index/query/ScriptScoreQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/ScriptScoreQueryBuilderTests.java index 320ff20b2ef5d..29d45fb42ffcc 100644 --- a/server/src/test/java/org/opensearch/index/query/ScriptScoreQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/ScriptScoreQueryBuilderTests.java @@ -43,7 +43,9 @@ import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; +import java.util.List; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.hamcrest.CoreMatchers.instanceOf; @@ -140,4 +142,11 @@ public void testDisallowExpensiveQueries() { OpenSearchException e = expectThrows(OpenSearchException.class, () -> queryBuilder.toQuery(queryShardContext)); assertEquals("[script score] queries cannot be executed when 'search.allow_expensive_queries' is set to false.", e.getMessage()); } + + public void testVisit() { + ScriptScoreQueryBuilder scriptQueryBuilder = doCreateTestQueryBuilder(); + List visitedQueries = new ArrayList<>(); + scriptQueryBuilder.visit(createTestVisitor(visitedQueries)); + assertEquals(2, visitedQueries.size()); + } } diff --git a/server/src/test/java/org/opensearch/index/query/SpanContainingQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/SpanContainingQueryBuilderTests.java index fff4369e155b3..2a5441b0ea932 100644 --- a/server/src/test/java/org/opensearch/index/query/SpanContainingQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/SpanContainingQueryBuilderTests.java @@ -38,6 +38,8 @@ import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; @@ -192,4 +194,10 @@ public void testFromJsonWithNonDefaultBoostInLittleQuery() { equalTo("span_containing [little] as a nested span clause can't have non-default boost value [2.0]") ); } + + public void testVisit() { + List visitorQueries = new ArrayList<>(); + doCreateTestQueryBuilder().visit(createTestVisitor(visitorQueries)); + assertEquals(3, visitorQueries.size()); + } } diff --git a/server/src/test/java/org/opensearch/index/query/SpanFirstQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/SpanFirstQueryBuilderTests.java index 781d2defcfd37..82bb77726b037 100644 --- a/server/src/test/java/org/opensearch/index/query/SpanFirstQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/SpanFirstQueryBuilderTests.java @@ -40,6 +40,8 @@ import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import static org.opensearch.index.query.QueryBuilders.spanTermQuery; import static org.hamcrest.CoreMatchers.equalTo; @@ -128,4 +130,10 @@ public void testFromJsonWithNonDefaultBoostInMatchQuery() { Exception exception = expectThrows(ParsingException.class, () -> parseQuery(json)); assertThat(exception.getMessage(), equalTo("span_first [match] as a nested span clause can't have non-default boost value [2.0]")); } + + public void testVisit() { + List visitorQueries = new ArrayList<>(); + doCreateTestQueryBuilder().visit(createTestVisitor(visitorQueries)); + assertEquals(2, visitorQueries.size()); + } } diff --git a/server/src/test/java/org/opensearch/index/query/SpanMultiTermQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/SpanMultiTermQueryBuilderTests.java index e20d56c6aff2a..b4abff118802e 100644 --- a/server/src/test/java/org/opensearch/index/query/SpanMultiTermQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/SpanMultiTermQueryBuilderTests.java @@ -60,6 +60,8 @@ import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import static java.util.Collections.singleton; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; @@ -306,4 +308,12 @@ public void testTopNMultiTermsRewriteInsideSpan() throws Exception { } } + + public void testVisit() { + MultiTermQueryBuilder multiTermQueryBuilder = new PrefixQueryBuilderTests().createTestQueryBuilder(); + SpanMultiTermQueryBuilder spanMultiTermQueryBuilder = new SpanMultiTermQueryBuilder(multiTermQueryBuilder); + List visitorQueries = new ArrayList<>(); + spanMultiTermQueryBuilder.visit(createTestVisitor(visitorQueries)); + assertEquals(2, visitorQueries.size()); + } } diff --git a/server/src/test/java/org/opensearch/index/query/SpanNearQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/SpanNearQueryBuilderTests.java index 8c9130d4b7bbd..c97b541da2199 100644 --- a/server/src/test/java/org/opensearch/index/query/SpanNearQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/SpanNearQueryBuilderTests.java @@ -41,7 +41,9 @@ import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.ArrayList; import java.util.Iterator; +import java.util.List; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.either; @@ -231,4 +233,12 @@ public void testFromJsonWithNonDefaultBoostInInnerQuery() { Exception exception = expectThrows(ParsingException.class, () -> parseQuery(json)); assertThat(exception.getMessage(), equalTo("span_near [clauses] as a nested span clause can't have non-default boost value [2.0]")); } + + public void testVisit() { + SpanTermQueryBuilder[] spanTermQueries = new SpanTermQueryBuilderTests().createSpanTermQueryBuilders(1); + SpanNearQueryBuilder spanNearQueryBuilder = new SpanNearQueryBuilder(spanTermQueries[0], 1); + List visitorQueries = new ArrayList<>(); + spanNearQueryBuilder.visit(createTestVisitor(visitorQueries)); + assertEquals(2, visitorQueries.size()); + } } diff --git a/server/src/test/java/org/opensearch/index/query/SpanNotQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/SpanNotQueryBuilderTests.java index b85c7cf81b0e1..ff42b271ffca5 100644 --- a/server/src/test/java/org/opensearch/index/query/SpanNotQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/SpanNotQueryBuilderTests.java @@ -40,6 +40,8 @@ import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import static org.opensearch.index.query.QueryBuilders.spanNearQuery; import static org.opensearch.index.query.QueryBuilders.spanTermQuery; @@ -316,4 +318,10 @@ public void testFromJsonWithNonDefaultBoostInExcludeQuery() { Exception exception = expectThrows(ParsingException.class, () -> parseQuery(json)); assertThat(exception.getMessage(), equalTo("span_not [exclude] as a nested span clause can't have non-default boost value [2.0]")); } + + public void testVisit() { + List visitedQueries = new ArrayList<>(); + doCreateTestQueryBuilder().visit(createTestVisitor(visitedQueries)); + assertEquals(3, visitedQueries.size()); + } } diff --git a/server/src/test/java/org/opensearch/index/query/SpanOrQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/SpanOrQueryBuilderTests.java index 45323b5df74df..eb4b8fd486cb0 100644 --- a/server/src/test/java/org/opensearch/index/query/SpanOrQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/SpanOrQueryBuilderTests.java @@ -39,7 +39,9 @@ import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.ArrayList; import java.util.Iterator; +import java.util.List; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; @@ -137,4 +139,12 @@ public void testFromJsonWithNonDefaultBoostInInnerQuery() { Exception exception = expectThrows(ParsingException.class, () -> parseQuery(json)); assertThat(exception.getMessage(), equalTo("span_or [clauses] as a nested span clause can't have non-default boost value [2.0]")); } + + public void testVisit() { + SpanTermQueryBuilder spanTermQueryBuilder = new SpanTermQueryBuilder("demo", "demo"); + SpanOrQueryBuilder spanOrQueryBuilder = new SpanOrQueryBuilder(spanTermQueryBuilder); + List visitedQueries = new ArrayList<>(); + spanOrQueryBuilder.visit(createTestVisitor(visitedQueries)); + assertEquals(2, visitedQueries.size()); + } } diff --git a/server/src/test/java/org/opensearch/index/query/SpanWithinQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/SpanWithinQueryBuilderTests.java index e8b6a21254ff8..74b955e872d51 100644 --- a/server/src/test/java/org/opensearch/index/query/SpanWithinQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/SpanWithinQueryBuilderTests.java @@ -38,6 +38,8 @@ import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; @@ -189,4 +191,10 @@ public void testFromJsonWithNonDefaultBoostInLittleQuery() { equalTo("span_within [little] as a nested span clause can't have non-default boost value [2.0]") ); } + + public void testVisit() { + List visitedQueries = new ArrayList<>(); + doCreateTestQueryBuilder().visit(createTestVisitor(visitedQueries)); + assertEquals(3, visitedQueries.size()); + } } diff --git a/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java b/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java index f65b0e231b1dd..c27e4bf27327a 100644 --- a/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java +++ b/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java @@ -32,11 +32,20 @@ package org.opensearch.index.search.stats; +import org.opensearch.action.search.SearchPhase; +import org.opensearch.action.search.SearchPhaseContext; +import org.opensearch.action.search.SearchPhaseName; +import org.opensearch.action.search.SearchRequestStats; import org.opensearch.index.search.stats.SearchStats.Stats; import org.opensearch.test.OpenSearchTestCase; import java.util.HashMap; import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class SearchStatsTests extends OpenSearchTestCase { @@ -63,6 +72,37 @@ public void testShardLevelSearchGroupStats() throws Exception { // adding again would then return wrong search stats (would return 4! instead of 3) searchStats1.add(searchStats2); assertStats(groupStats1.get("group1"), 3); + + long paramValue = randomIntBetween(2, 50); + + // Testing for request stats + SearchRequestStats testRequestStats = new SearchRequestStats(); + SearchPhaseContext ctx = mock(SearchPhaseContext.class); + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + SearchPhase mockSearchPhase = mock(SearchPhase.class); + when(ctx.getCurrentPhase()).thenReturn(mockSearchPhase); + when(mockSearchPhase.getStartTimeInNanos()).thenReturn(System.nanoTime() - TimeUnit.SECONDS.toNanos(paramValue)); + when(mockSearchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); + for (int iterator = 0; iterator < paramValue; iterator++) { + testRequestStats.onPhaseStart(ctx); + testRequestStats.onPhaseEnd(ctx); + } + } + searchStats1.setSearchRequestStats(testRequestStats); + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + assertEquals( + 0, + searchStats1.getTotal().getRequestStatsLongHolder().getRequestStatsHolder().get(searchPhaseName.getName()).current + ); + assertEquals( + paramValue, + searchStats1.getTotal().getRequestStatsLongHolder().getRequestStatsHolder().get(searchPhaseName.getName()).total + ); + assertThat( + searchStats1.getTotal().getRequestStatsLongHolder().getRequestStatsHolder().get(searchPhaseName.getName()).timeInMillis, + greaterThanOrEqualTo(paramValue) + ); + } } private static void assertStats(Stats stats, long equalTo) { @@ -87,5 +127,4 @@ private static void assertStats(Stats stats, long equalTo) { // avg_concurrency is not summed up across stats assertEquals(1, stats.getConcurrentAvgSliceCount(), 0); } - } diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index cd272d8f626d0..b2eb41828a4df 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -1780,7 +1780,7 @@ public Set getPendingDeletions() throws IOException { } }; - try (Store store = createStore(shardId, new IndexSettings(metadata, Settings.EMPTY), directory)) { + try (Store store = createStore(shardId, new IndexSettings(metadata, Settings.EMPTY), directory, shardPath)) { IndexShard shard = newShard( shardRouting, shardPath, diff --git a/server/src/test/java/org/opensearch/index/shard/IndexingStatsTests.java b/server/src/test/java/org/opensearch/index/shard/IndexingStatsTests.java new file mode 100644 index 0000000000000..acf482552c260 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/shard/IndexingStatsTests.java @@ -0,0 +1,141 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.shard; + +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Comparator; +import java.util.concurrent.atomic.AtomicLong; + +public class IndexingStatsTests extends OpenSearchTestCase { + + public void testSerialization() throws IOException { + IndexingStats stats = createTestInstance(); + + try (BytesStreamOutput out = new BytesStreamOutput()) { + stats.writeTo(out); + + try (StreamInput in = out.bytes().streamInput()) { + IndexingStats deserializedStats = new IndexingStats(in); + + if (stats.getTotal() == null) { + assertNull(deserializedStats.getTotal()); + return; + } + + IndexingStats.Stats totalStats = stats.getTotal(); + IndexingStats.Stats deserializedTotalStats = deserializedStats.getTotal(); + + assertEquals(totalStats.getIndexCount(), deserializedTotalStats.getIndexCount()); + assertEquals(totalStats.getIndexTime(), deserializedTotalStats.getIndexTime()); + assertEquals(totalStats.getIndexCurrent(), deserializedTotalStats.getIndexCurrent()); + assertEquals(totalStats.getIndexFailedCount(), deserializedTotalStats.getIndexFailedCount()); + assertEquals(totalStats.getDeleteCount(), deserializedTotalStats.getDeleteCount()); + assertEquals(totalStats.getDeleteTime(), deserializedTotalStats.getDeleteTime()); + assertEquals(totalStats.getDeleteCurrent(), deserializedTotalStats.getDeleteCurrent()); + assertEquals(totalStats.getNoopUpdateCount(), deserializedTotalStats.getNoopUpdateCount()); + assertEquals(totalStats.isThrottled(), deserializedTotalStats.isThrottled()); + assertEquals(totalStats.getThrottleTime(), deserializedTotalStats.getThrottleTime()); + + if (totalStats.getDocStatusStats() == null) { + assertNull(deserializedTotalStats.getDocStatusStats()); + return; + } + + IndexingStats.Stats.DocStatusStats docStatusStats = totalStats.getDocStatusStats(); + IndexingStats.Stats.DocStatusStats deserializedDocStatusStats = deserializedTotalStats.getDocStatusStats(); + + assertTrue( + Arrays.equals( + docStatusStats.getDocStatusCounter(), + deserializedDocStatusStats.getDocStatusCounter(), + Comparator.comparingLong(AtomicLong::longValue) + ) + ); + } + } + } + + public void testToXContentForIndexingStats() throws IOException { + IndexingStats stats = createTestInstance(); + IndexingStats.Stats totalStats = stats.getTotal(); + AtomicLong[] counter = totalStats.getDocStatusStats().getDocStatusCounter(); + + String expected = "{\"indexing\":{\"index_total\":" + + totalStats.getIndexCount() + + ",\"index_time_in_millis\":" + + totalStats.getIndexTime().getMillis() + + ",\"index_current\":" + + totalStats.getIndexCurrent() + + ",\"index_failed\":" + + totalStats.getIndexFailedCount() + + ",\"delete_total\":" + + totalStats.getDeleteCount() + + ",\"delete_time_in_millis\":" + + totalStats.getDeleteTime().getMillis() + + ",\"delete_current\":" + + totalStats.getDeleteCurrent() + + ",\"noop_update_total\":" + + totalStats.getNoopUpdateCount() + + ",\"is_throttled\":" + + totalStats.isThrottled() + + ",\"throttle_time_in_millis\":" + + totalStats.getThrottleTime().getMillis() + + ",\"doc_status\":{\"1xx\":" + + counter[0] + + ",\"2xx\":" + + counter[1] + + ",\"3xx\":" + + counter[2] + + ",\"4xx\":" + + counter[3] + + ",\"5xx\":" + + counter[4] + + "}}}"; + + XContentBuilder xContentBuilder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); + xContentBuilder.startObject(); + xContentBuilder = stats.toXContent(xContentBuilder, ToXContent.EMPTY_PARAMS); + xContentBuilder.endObject(); + + assertEquals(expected, xContentBuilder.toString()); + } + + private IndexingStats createTestInstance() { + IndexingStats.Stats.DocStatusStats docStatusStats = new IndexingStats.Stats.DocStatusStats(); + for (int i = 1; i < 6; ++i) { + docStatusStats.add(RestStatus.fromCode(i * 100), randomNonNegativeLong()); + } + + IndexingStats.Stats stats = new IndexingStats.Stats( + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomBoolean(), + randomNonNegativeLong(), + docStatusStats + ); + + return new IndexingStats(stats); + } + +} diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java index c713ccdddd66a..415efd4ac23b6 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java @@ -152,7 +152,8 @@ public void testRemoteDirectoryInitThrowsException() throws IOException { mock(RemoteDirectory.class), remoteMetadataDirectory, mock(RemoteStoreLockManager.class), - mock(ThreadPool.class) + mock(ThreadPool.class), + shardId ); FilterDirectory remoteStoreFilterDirectory = new RemoteStoreRefreshListenerTests.TestFilterDirectory( new RemoteStoreRefreshListenerTests.TestFilterDirectory(remoteSegmentStoreDirectory) diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java index 2948528ad82e1..8d99d98fbaaf4 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java @@ -8,6 +8,8 @@ package org.opensearch.index.store; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexFormatTooNewException; @@ -28,6 +30,7 @@ import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.lucene.store.ByteArrayIndexInput; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesReference; @@ -46,6 +49,7 @@ import java.io.ByteArrayInputStream; import java.io.IOException; import java.nio.file.NoSuchFileException; +import java.nio.file.Path; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; @@ -61,6 +65,8 @@ import static org.opensearch.test.RemoteStoreTestUtils.createMetadataFileBytes; import static org.opensearch.test.RemoteStoreTestUtils.getDummyMetadata; import static org.hamcrest.CoreMatchers.is; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.contains; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.doThrow; @@ -73,6 +79,7 @@ import static org.mockito.Mockito.when; public class RemoteSegmentStoreDirectoryTests extends IndexShardTestCase { + private static final Logger logger = LogManager.getLogger(RemoteSegmentStoreDirectoryTests.class); private RemoteDirectory remoteDataDirectory; private RemoteDirectory remoteMetadataDirectory; private RemoteStoreMetadataLockManager mdLockManager; @@ -93,13 +100,6 @@ public void setup() throws IOException { remoteMetadataDirectory = mock(RemoteDirectory.class); mdLockManager = mock(RemoteStoreMetadataLockManager.class); threadPool = mock(ThreadPool.class); - - remoteSegmentStoreDirectory = new RemoteSegmentStoreDirectory( - remoteDataDirectory, - remoteMetadataDirectory, - mdLockManager, - threadPool - ); testUploadTracker = new TestUploadListener(); Settings indexSettings = Settings.builder() @@ -109,6 +109,13 @@ public void setup() throws IOException { ExecutorService executorService = OpenSearchExecutors.newDirectExecutorService(); indexShard = newStartedShard(false, indexSettings, new NRTReplicationEngineFactory()); + remoteSegmentStoreDirectory = new RemoteSegmentStoreDirectory( + remoteDataDirectory, + remoteMetadataDirectory, + mdLockManager, + threadPool, + indexShard.shardId() + ); try (Store store = indexShard.store()) { segmentInfos = store.readLastCommittedSegmentsInfo(); } @@ -516,6 +523,114 @@ public void onFailure(Exception e) {} storeDirectory.close(); } + public void testCopyFilesToMultipart() throws Exception { + Settings settings = Settings.builder().build(); + FeatureFlags.initializeFeatureFlags(settings); + + String filename = "_0.cfe"; + populateMetadata(); + remoteSegmentStoreDirectory.init(); + + Directory storeDirectory = mock(Directory.class); + AsyncMultiStreamBlobContainer blobContainer = mock(AsyncMultiStreamBlobContainer.class); + when(remoteDataDirectory.getBlobContainer()).thenReturn(blobContainer); + + Mockito.doAnswer(invocation -> { + ActionListener completionListener = invocation.getArgument(3); + completionListener.onResponse(invocation.getArgument(0)); + return null; + }).when(blobContainer).asyncBlobDownload(any(), any(), any(), any()); + + CountDownLatch downloadLatch = new CountDownLatch(1); + ActionListener completionListener = new ActionListener() { + @Override + public void onResponse(String unused) { + downloadLatch.countDown(); + } + + @Override + public void onFailure(Exception e) {} + }; + Path path = createTempDir(); + remoteSegmentStoreDirectory.copyTo(filename, storeDirectory, path, completionListener); + assertTrue(downloadLatch.await(5000, TimeUnit.SECONDS)); + verify(blobContainer, times(1)).asyncBlobDownload(contains(filename), eq(path.resolve(filename)), any(), any()); + verify(storeDirectory, times(0)).copyFrom(any(), any(), any(), any()); + } + + public void testCopyFilesTo() throws Exception { + String filename = "_0.cfe"; + populateMetadata(); + remoteSegmentStoreDirectory.init(); + + Directory storeDirectory = mock(Directory.class); + CountDownLatch downloadLatch = new CountDownLatch(1); + ActionListener completionListener = new ActionListener<>() { + @Override + public void onResponse(String unused) { + downloadLatch.countDown(); + } + + @Override + public void onFailure(Exception e) {} + }; + Path path = createTempDir(); + remoteSegmentStoreDirectory.copyTo(filename, storeDirectory, path, completionListener); + assertTrue(downloadLatch.await(5000, TimeUnit.MILLISECONDS)); + verify(storeDirectory, times(1)).copyFrom(any(), eq(filename), eq(filename), eq(IOContext.DEFAULT)); + } + + public void testCopyFilesToEmptyPath() throws Exception { + String filename = "_0.cfe"; + populateMetadata(); + remoteSegmentStoreDirectory.init(); + + Directory storeDirectory = mock(Directory.class); + AsyncMultiStreamBlobContainer blobContainer = mock(AsyncMultiStreamBlobContainer.class); + when(remoteDataDirectory.getBlobContainer()).thenReturn(blobContainer); + + CountDownLatch downloadLatch = new CountDownLatch(1); + ActionListener completionListener = new ActionListener<>() { + @Override + public void onResponse(String unused) { + downloadLatch.countDown(); + } + + @Override + public void onFailure(Exception e) {} + }; + remoteSegmentStoreDirectory.copyTo(filename, storeDirectory, null, completionListener); + assertTrue(downloadLatch.await(5000, TimeUnit.MILLISECONDS)); + verify(storeDirectory, times(1)).copyFrom(any(), eq(filename), eq(filename), eq(IOContext.DEFAULT)); + } + + public void testCopyFilesToException() throws Exception { + String filename = "_0.cfe"; + populateMetadata(); + remoteSegmentStoreDirectory.init(); + + Directory storeDirectory = mock(Directory.class); + Mockito.doThrow(new IOException()) + .when(storeDirectory) + .copyFrom(any(Directory.class), anyString(), anyString(), any(IOContext.class)); + CountDownLatch downloadLatch = new CountDownLatch(1); + ActionListener completionListener = new ActionListener<>() { + @Override + public void onResponse(String unused) { + + } + + @Override + public void onFailure(Exception e) { + downloadLatch.countDown(); + } + }; + Path path = createTempDir(); + remoteSegmentStoreDirectory.copyTo(filename, storeDirectory, path, completionListener); + assertTrue(downloadLatch.await(5000, TimeUnit.MILLISECONDS)); + verify(storeDirectory, times(1)).copyFrom(any(), eq(filename), eq(filename), eq(IOContext.DEFAULT)); + } + public void testCopyFilesFromMultipartIOException() throws Exception { String filename = "_100.si"; AsyncMultiStreamBlobContainer blobContainer = mock(AsyncMultiStreamBlobContainer.class); @@ -524,7 +639,8 @@ public void testCopyFilesFromMultipartIOException() throws Exception { remoteDataDirectory, remoteMetadataDirectory, mdLockManager, - threadPool + threadPool, + indexShard.shardId() ); populateMetadata(); diff --git a/server/src/test/java/org/opensearch/index/store/StoreTests.java b/server/src/test/java/org/opensearch/index/store/StoreTests.java index 8395b3e8ac08e..d7d326b325cc6 100644 --- a/server/src/test/java/org/opensearch/index/store/StoreTests.java +++ b/server/src/test/java/org/opensearch/index/store/StoreTests.java @@ -84,6 +84,7 @@ import org.opensearch.index.seqno.ReplicationTracker; import org.opensearch.index.seqno.RetentionLease; import org.opensearch.index.seqno.SequenceNumbers; +import org.opensearch.index.shard.ShardPath; import org.opensearch.index.translog.Translog; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.indices.store.TransportNodesListShardStoreMetadata; @@ -798,7 +799,7 @@ public void testOnCloseCallback() throws IOException { assertEquals(shardId, theLock.getShardId()); assertEquals(lock, theLock); count.incrementAndGet(); - }); + }, null); assertEquals(count.get(), 0); final int iters = randomIntBetween(1, 10); @@ -809,6 +810,26 @@ public void testOnCloseCallback() throws IOException { assertEquals(count.get(), 1); } + public void testStoreShardPath() { + final ShardId shardId = new ShardId("index", "_na_", 1); + final Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, org.opensearch.Version.CURRENT) + .put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), TimeValue.timeValueMinutes(0)) + .build(); + final Path path = createTempDir().resolve(shardId.getIndex().getUUID()).resolve(String.valueOf(shardId.id())); + final ShardPath shardPath = new ShardPath(false, path, path, shardId); + final Store store = new Store( + shardId, + IndexSettingsModule.newIndexSettings("index", settings), + StoreTests.newDirectory(random()), + new DummyShardLock(shardId), + Store.OnClose.EMPTY, + shardPath + ); + assertEquals(shardPath, store.shardPath()); + store.close(); + } + public void testStoreStats() throws IOException { final ShardId shardId = new ShardId("index", "_na_", 1); Settings settings = Settings.builder() diff --git a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java index de1b2990f0a50..233d6f319b797 100644 --- a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java @@ -108,7 +108,6 @@ import static org.mockito.Mockito.when; @LuceneTestCase.SuppressFileSystems("ExtrasFS") - public class RemoteFsTranslogTests extends OpenSearchTestCase { protected final ShardId shardId = new ShardId("index", "_na_", 1); @@ -621,6 +620,7 @@ public void testSimpleOperationsUpload() throws Exception { translog.setMinSeqNoToKeep(2); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); translog.trimUnreferencedReaders(); assertEquals(1, translog.readers.size()); assertEquals(1, translog.stats().estimatedNumberOfOperations()); diff --git a/server/src/test/java/org/opensearch/indices/NodeIndicesStatsTests.java b/server/src/test/java/org/opensearch/indices/NodeIndicesStatsTests.java index 9be45d4e77940..6f36d22b7e17b 100644 --- a/server/src/test/java/org/opensearch/indices/NodeIndicesStatsTests.java +++ b/server/src/test/java/org/opensearch/indices/NodeIndicesStatsTests.java @@ -32,6 +32,8 @@ package org.opensearch.indices; +import org.opensearch.action.admin.indices.stats.CommonStats; +import org.opensearch.action.search.SearchRequestStats; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.test.OpenSearchTestCase; @@ -43,7 +45,9 @@ public class NodeIndicesStatsTests extends OpenSearchTestCase { public void testInvalidLevel() { - final NodeIndicesStats stats = new NodeIndicesStats(null, Collections.emptyMap()); + CommonStats oldStats = new CommonStats(); + SearchRequestStats requestStats = new SearchRequestStats(); + final NodeIndicesStats stats = new NodeIndicesStats(oldStats, Collections.emptyMap(), requestStats); final String level = randomAlphaOfLength(16); final ToXContent.Params params = new ToXContent.MapParams(Collections.singletonMap("level", level)); final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> stats.toXContent(null, params)); diff --git a/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdownTests.java b/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdownTests.java index 065c90b262e11..f29ba3b0cea07 100644 --- a/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdownTests.java +++ b/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdownTests.java @@ -81,9 +81,9 @@ public void testBreakdownMapWithNoLeafContext() throws Exception { ); // verify total/min/max/avg node time is same as weight time assertEquals(createWeightTime, testQueryProfileBreakdown.toNodeTime()); - assertEquals(createWeightTime, testQueryProfileBreakdown.getMaxSliceNodeTime()); - assertEquals(createWeightTime, testQueryProfileBreakdown.getMinSliceNodeTime()); - assertEquals(createWeightTime, testQueryProfileBreakdown.getAvgSliceNodeTime()); + assertEquals(0, testQueryProfileBreakdown.getMaxSliceNodeTime()); + assertEquals(0, testQueryProfileBreakdown.getMinSliceNodeTime()); + assertEquals(0, testQueryProfileBreakdown.getAvgSliceNodeTime()); continue; } assertEquals(0, (long) queryBreakDownMap.get(timingTypeKey)); @@ -103,16 +103,15 @@ public void testBuildSliceLevelBreakdownWithSingleSlice() throws Exception { final LeafReaderContext sliceLeaf = directoryReader.leaves().get(0); final Collector sliceCollector = mock(Collector.class); final long createWeightEarliestStartTime = createWeightTimer.getEarliestTimerStartTime(); - final Map leafProfileBreakdownMap = getLeafBreakdownMap(createWeightEarliestStartTime + 10, 10, 1); + final long createWeightEndTime = createWeightEarliestStartTime + createWeightTimer.getApproximateTiming(); + final Map leafProfileBreakdownMap = getLeafBreakdownMap(createWeightEndTime + 10, 10, 1); final AbstractProfileBreakdown leafProfileBreakdown = new TestQueryProfileBreakdown( QueryTimingType.class, leafProfileBreakdownMap ); testQueryProfileBreakdown.associateCollectorToLeaves(sliceCollector, sliceLeaf); testQueryProfileBreakdown.getContexts().put(sliceLeaf, leafProfileBreakdown); - final Map> sliceBreakdownMap = testQueryProfileBreakdown.buildSliceLevelBreakdown( - createWeightEarliestStartTime - ); + final Map> sliceBreakdownMap = testQueryProfileBreakdown.buildSliceLevelBreakdown(); assertFalse(sliceBreakdownMap == null || sliceBreakdownMap.isEmpty()); assertEquals(1, sliceBreakdownMap.size()); assertTrue(sliceBreakdownMap.containsKey(sliceCollector)); @@ -141,9 +140,9 @@ public void testBuildSliceLevelBreakdownWithSingleSlice() throws Exception { (long) sliceBreakdown.get(timingTypeKey + SLICE_END_TIME_SUFFIX) ); } - assertEquals(20, testQueryProfileBreakdown.getMaxSliceNodeTime()); - assertEquals(20, testQueryProfileBreakdown.getMinSliceNodeTime()); - assertEquals(20, testQueryProfileBreakdown.getAvgSliceNodeTime()); + assertEquals(10, testQueryProfileBreakdown.getMaxSliceNodeTime()); + assertEquals(10, testQueryProfileBreakdown.getMinSliceNodeTime()); + assertEquals(10, testQueryProfileBreakdown.getAvgSliceNodeTime()); directoryReader.close(); directory.close(); } @@ -154,8 +153,9 @@ public void testBuildSliceLevelBreakdownWithMultipleSlices() throws Exception { final Collector sliceCollector_1 = mock(Collector.class); final Collector sliceCollector_2 = mock(Collector.class); final long createWeightEarliestStartTime = createWeightTimer.getEarliestTimerStartTime(); - final Map leafProfileBreakdownMap_1 = getLeafBreakdownMap(createWeightEarliestStartTime + 10, 10, 1); - final Map leafProfileBreakdownMap_2 = getLeafBreakdownMap(createWeightEarliestStartTime + 40, 10, 1); + final long createWeightEndTime = createWeightEarliestStartTime + createWeightTimer.getApproximateTiming(); + final Map leafProfileBreakdownMap_1 = getLeafBreakdownMap(createWeightEndTime + 10, 10, 1); + final Map leafProfileBreakdownMap_2 = getLeafBreakdownMap(createWeightEndTime + 40, 10, 1); final AbstractProfileBreakdown leafProfileBreakdown_1 = new TestQueryProfileBreakdown( QueryTimingType.class, leafProfileBreakdownMap_1 @@ -168,9 +168,7 @@ public void testBuildSliceLevelBreakdownWithMultipleSlices() throws Exception { testQueryProfileBreakdown.associateCollectorToLeaves(sliceCollector_2, directoryReader.leaves().get(1)); testQueryProfileBreakdown.getContexts().put(directoryReader.leaves().get(0), leafProfileBreakdown_1); testQueryProfileBreakdown.getContexts().put(directoryReader.leaves().get(1), leafProfileBreakdown_2); - final Map> sliceBreakdownMap = testQueryProfileBreakdown.buildSliceLevelBreakdown( - createWeightEarliestStartTime - ); + final Map> sliceBreakdownMap = testQueryProfileBreakdown.buildSliceLevelBreakdown(); assertFalse(sliceBreakdownMap == null || sliceBreakdownMap.isEmpty()); assertEquals(2, sliceBreakdownMap.size()); @@ -208,9 +206,9 @@ public void testBuildSliceLevelBreakdownWithMultipleSlices() throws Exception { } } - assertEquals(50, testQueryProfileBreakdown.getMaxSliceNodeTime()); - assertEquals(20, testQueryProfileBreakdown.getMinSliceNodeTime()); - assertEquals(35, testQueryProfileBreakdown.getAvgSliceNodeTime()); + assertEquals(10, testQueryProfileBreakdown.getMaxSliceNodeTime()); + assertEquals(10, testQueryProfileBreakdown.getMinSliceNodeTime()); + assertEquals(10, testQueryProfileBreakdown.getAvgSliceNodeTime()); directoryReader.close(); directory.close(); } @@ -221,8 +219,9 @@ public void testBreakDownMapWithMultipleSlices() throws Exception { final Collector sliceCollector_1 = mock(Collector.class); final Collector sliceCollector_2 = mock(Collector.class); final long createWeightEarliestStartTime = createWeightTimer.getEarliestTimerStartTime(); - final Map leafProfileBreakdownMap_1 = getLeafBreakdownMap(createWeightEarliestStartTime + 10, 10, 1); - final Map leafProfileBreakdownMap_2 = getLeafBreakdownMap(createWeightEarliestStartTime + 40, 20, 1); + final long createWeightEndTime = createWeightEarliestStartTime + createWeightTimer.getApproximateTiming(); + final Map leafProfileBreakdownMap_1 = getLeafBreakdownMap(createWeightEndTime + 10, 10, 1); + final Map leafProfileBreakdownMap_2 = getLeafBreakdownMap(createWeightEndTime + 40, 20, 1); final AbstractProfileBreakdown leafProfileBreakdown_1 = new TestQueryProfileBreakdown( QueryTimingType.class, leafProfileBreakdownMap_1 @@ -269,9 +268,67 @@ public void testBreakDownMapWithMultipleSlices() throws Exception { assertEquals(1, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeCountKey)); } - assertEquals(60, testQueryProfileBreakdown.getMaxSliceNodeTime()); - assertEquals(20, testQueryProfileBreakdown.getMinSliceNodeTime()); - assertEquals(40, testQueryProfileBreakdown.getAvgSliceNodeTime()); + assertEquals(20, testQueryProfileBreakdown.getMaxSliceNodeTime()); + assertEquals(10, testQueryProfileBreakdown.getMinSliceNodeTime()); + assertEquals(15, testQueryProfileBreakdown.getAvgSliceNodeTime()); + directoryReader.close(); + directory.close(); + } + + public void testBreakDownMapWithMultipleSlicesAndOneSliceWithNoLeafContext() throws Exception { + final DirectoryReader directoryReader = getDirectoryReader(2); + final Directory directory = directoryReader.directory(); + final Collector sliceCollector_1 = mock(Collector.class); + final Collector sliceCollector_2 = mock(Collector.class); + final long createWeightEarliestStartTime = createWeightTimer.getEarliestTimerStartTime(); + final long createWeightEndTime = createWeightEarliestStartTime + createWeightTimer.getApproximateTiming(); + final Map leafProfileBreakdownMap_1 = getLeafBreakdownMap(createWeightEndTime + 10, 10, 1); + final AbstractProfileBreakdown leafProfileBreakdown_1 = new TestQueryProfileBreakdown( + QueryTimingType.class, + leafProfileBreakdownMap_1 + ); + testQueryProfileBreakdown.associateCollectorToLeaves(sliceCollector_1, directoryReader.leaves().get(0)); + testQueryProfileBreakdown.associateCollectorToLeaves(sliceCollector_2, directoryReader.leaves().get(1)); + testQueryProfileBreakdown.getContexts().put(directoryReader.leaves().get(0), leafProfileBreakdown_1); + // leaf2 profile breakdown is not present in contexts map + + Map queryBreakDownMap = testQueryProfileBreakdown.toBreakdownMap(); + assertFalse(queryBreakDownMap == null || queryBreakDownMap.isEmpty()); + assertEquals(66, queryBreakDownMap.size()); + + for (QueryTimingType queryTimingType : QueryTimingType.values()) { + String timingTypeKey = queryTimingType.toString(); + String timingTypeCountKey = queryTimingType + TIMING_TYPE_COUNT_SUFFIX; + + if (queryTimingType.equals(QueryTimingType.CREATE_WEIGHT)) { + final long createWeightTime = queryBreakDownMap.get(timingTypeKey); + assertEquals(createWeightTimer.getApproximateTiming(), createWeightTime); + assertEquals(1, (long) queryBreakDownMap.get(timingTypeCountKey)); + // verify there is no min/max/avg for weight type stats + assertFalse( + queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(MIN_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeCountKey) + || queryBreakDownMap.containsKey(MIN_PREFIX + timingTypeCountKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeCountKey) + ); + continue; + } + assertEquals(10, (long) queryBreakDownMap.get(timingTypeKey)); + assertEquals(10, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeKey)); + assertEquals(5, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeKey)); + assertEquals(0, (long) queryBreakDownMap.get(MIN_PREFIX + timingTypeKey)); + assertEquals(1, (long) queryBreakDownMap.get(timingTypeCountKey)); + assertEquals(1, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeCountKey)); + // min of 0 means one of the slice didn't worked on any leaf context + assertEquals(0, (long) queryBreakDownMap.get(MIN_PREFIX + timingTypeCountKey)); + assertEquals(0, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeCountKey)); + } + + assertEquals(10, testQueryProfileBreakdown.getMaxSliceNodeTime()); + assertEquals(0, testQueryProfileBreakdown.getMinSliceNodeTime()); + assertEquals(5, testQueryProfileBreakdown.getAvgSliceNodeTime()); directoryReader.close(); directory.close(); } diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 3c31c979ce856..09f5c1bea1a5e 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -2069,6 +2069,7 @@ public void onFailure(final Exception e) { new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService, threadPool), repositoriesServiceReference::get, fileCacheCleaner, + null, new RemoteStoreStatsTrackerFactory(clusterService, settings) ); final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings); @@ -2209,6 +2210,7 @@ public void onFailure(final Exception e) { indexNameExpressionResolver, new AutoCreateIndex(settings, clusterSettings, indexNameExpressionResolver, new SystemIndices(emptyMap())), new IndexingPressureService(settings, clusterService), + mock(IndicesService.class), new SystemIndices(emptyMap()) ) ); @@ -2297,7 +2299,8 @@ public void onFailure(final Exception e) { namedWriteableRegistry, List.of(), client - ) + ), + null ) ); actions.put( diff --git a/server/src/test/java/org/opensearch/telemetry/tracing/TracerFactoryTests.java b/server/src/test/java/org/opensearch/telemetry/tracing/TracerFactoryTests.java index f202be70c9425..b27f888eaf502 100644 --- a/server/src/test/java/org/opensearch/telemetry/tracing/TracerFactoryTests.java +++ b/server/src/test/java/org/opensearch/telemetry/tracing/TracerFactoryTests.java @@ -48,10 +48,14 @@ public void testGetTracerWithUnavailableTracingTelemetryReturnsNoopTracer() { Tracer tracer = tracerFactory.getTracer(); assertTrue(tracer instanceof NoopTracer); - assertTrue(tracer.startSpan("foo") == NoopSpan.INSTANCE); - assertTrue(tracer.startScopedSpan(new SpanCreationContext("foo", Attributes.EMPTY)) == ScopedSpan.NO_OP); - assertTrue(tracer.startScopedSpan(new SpanCreationContext("foo", Attributes.EMPTY)) == ScopedSpan.NO_OP); - assertTrue(tracer.withSpanInScope(tracer.startSpan("foo")) == SpanScope.NO_OP); + assertTrue(tracer.startSpan(SpanCreationContext.internal().name("foo").attributes(Attributes.EMPTY)) == NoopSpan.INSTANCE); + assertTrue(tracer.startScopedSpan(SpanCreationContext.internal().name("foo").attributes(Attributes.EMPTY)) == ScopedSpan.NO_OP); + assertTrue(tracer.startScopedSpan(SpanCreationContext.internal().name("foo").attributes(Attributes.EMPTY)) == ScopedSpan.NO_OP); + assertTrue( + tracer.withSpanInScope( + tracer.startSpan(SpanCreationContext.internal().name("foo").attributes(Attributes.EMPTY)) + ) == SpanScope.NO_OP + ); } public void testGetTracerWithUnavailableTracingTelemetry() { @@ -64,7 +68,7 @@ public void testGetTracerWithUnavailableTracingTelemetry() { Tracer tracer = tracerFactory.getTracer(); assertTrue(tracer instanceof NoopTracer); - assertTrue(tracer.startScopedSpan(new SpanCreationContext("foo", Attributes.EMPTY)) == ScopedSpan.NO_OP); + assertTrue(tracer.startScopedSpan(SpanCreationContext.internal().name("foo").attributes(Attributes.EMPTY)) == ScopedSpan.NO_OP); } public void testGetTracerWithAvailableTracingTelemetryReturnsWrappedTracer() { diff --git a/server/src/test/java/org/opensearch/telemetry/tracing/WrappedTracerTests.java b/server/src/test/java/org/opensearch/telemetry/tracing/WrappedTracerTests.java index b70fe81d5f9c4..43e0cb8e44439 100644 --- a/server/src/test/java/org/opensearch/telemetry/tracing/WrappedTracerTests.java +++ b/server/src/test/java/org/opensearch/telemetry/tracing/WrappedTracerTests.java @@ -22,7 +22,6 @@ import java.util.List; import java.util.Set; -import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; @@ -36,9 +35,10 @@ public void testStartSpanWithTracingDisabledInvokesNoopTracer() throws Exception DefaultTracer mockDefaultTracer = mock(DefaultTracer.class); try (WrappedTracer wrappedTracer = new WrappedTracer(telemetrySettings, mockDefaultTracer)) { - wrappedTracer.startSpan("foo"); + SpanCreationContext spanCreationContext = SpanCreationContext.internal().name("foo"); + wrappedTracer.startSpan(spanCreationContext); assertTrue(wrappedTracer.getDelegateTracer() instanceof NoopTracer); - verify(mockDefaultTracer, never()).startSpan("foo"); + verify(mockDefaultTracer, never()).startSpan(SpanCreationContext.internal().name("foo")); } } @@ -48,10 +48,11 @@ public void testStartSpanWithTracingEnabledInvokesDefaultTracer() throws Excepti DefaultTracer mockDefaultTracer = mock(DefaultTracer.class); try (WrappedTracer wrappedTracer = new WrappedTracer(telemetrySettings, mockDefaultTracer)) { - wrappedTracer.startSpan("foo"); + SpanCreationContext spanCreationContext = SpanCreationContext.internal().name("foo"); + wrappedTracer.startSpan(spanCreationContext); assertTrue(wrappedTracer.getDelegateTracer() instanceof DefaultTracer); - verify(mockDefaultTracer).startSpan(eq("foo"), eq((SpanContext) null), any(Attributes.class)); + verify(mockDefaultTracer).startSpan(eq(spanCreationContext)); } } @@ -61,10 +62,11 @@ public void testStartSpanWithTracingEnabledInvokesDefaultTracerWithAttr() throws DefaultTracer mockDefaultTracer = mock(DefaultTracer.class); Attributes attributes = Attributes.create().addAttribute("key", "value"); try (WrappedTracer wrappedTracer = new WrappedTracer(telemetrySettings, mockDefaultTracer)) { - wrappedTracer.startSpan("foo", attributes); + SpanCreationContext spanCreationContext = SpanCreationContext.internal().name("foo"); + wrappedTracer.startSpan(spanCreationContext); assertTrue(wrappedTracer.getDelegateTracer() instanceof DefaultTracer); - verify(mockDefaultTracer).startSpan("foo", (SpanContext) null, attributes); + verify(mockDefaultTracer).startSpan(spanCreationContext); } } diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 42de21875d291..4aad96eddaeb5 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -33,7 +33,7 @@ apply plugin: 'opensearch.java' group = 'hdfs' versions << [ - 'jetty': '9.4.51.v20230217' + 'jetty': '9.4.52.v20230823' ] dependencies { @@ -72,5 +72,5 @@ dependencies { exclude group: "com.squareup.okio" } runtimeOnly "com.squareup.okio:okio:3.5.0" - runtimeOnly "org.xerial.snappy:snappy-java:1.1.10.3" + runtimeOnly "org.xerial.snappy:snappy-java:1.1.10.4" } diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index 43f2cce668e81..4505570b8ebe0 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -272,11 +272,11 @@ public Settings threadPoolSettings() { } protected Store createStore(IndexSettings indexSettings, ShardPath shardPath) throws IOException { - return createStore(shardPath.getShardId(), indexSettings, newFSDirectory(shardPath.resolveIndex())); + return createStore(shardPath.getShardId(), indexSettings, newFSDirectory(shardPath.resolveIndex()), shardPath); } - protected Store createStore(ShardId shardId, IndexSettings indexSettings, Directory directory) throws IOException { - return new Store(shardId, indexSettings, directory, new DummyShardLock(shardId)); + protected Store createStore(ShardId shardId, IndexSettings indexSettings, Directory directory, ShardPath shardPath) throws IOException { + return new Store(shardId, indexSettings, directory, new DummyShardLock(shardId), Store.OnClose.EMPTY, shardPath); } protected Releasable acquirePrimaryOperationPermitBlockingly(IndexShard indexShard) throws ExecutionException, InterruptedException { @@ -654,7 +654,7 @@ protected IndexShard newShard( remotePath = createTempDir(); } - remoteStore = createRemoteStore(remotePath, routing, indexMetadata); + remoteStore = createRemoteStore(remotePath, routing, indexMetadata, shardPath); remoteStoreStatsTrackerFactory = new RemoteStoreStatsTrackerFactory(clusterService, indexSettings.getSettings()); BlobStoreRepository repo = createRepository(remotePath); @@ -768,11 +768,12 @@ protected RepositoriesService createRepositoriesService() { return repositoriesService; } - protected Store createRemoteStore(Path path, ShardRouting shardRouting, IndexMetadata metadata) throws IOException { + protected Store createRemoteStore(Path path, ShardRouting shardRouting, IndexMetadata metadata, ShardPath shardPath) + throws IOException { Settings nodeSettings = Settings.builder().put("node.name", shardRouting.currentNodeId()).build(); ShardId shardId = shardRouting.shardId(); RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = createRemoteSegmentStoreDirectory(shardId, path); - return createStore(shardId, new IndexSettings(metadata, nodeSettings), remoteSegmentStoreDirectory); + return createStore(shardId, new IndexSettings(metadata, nodeSettings), remoteSegmentStoreDirectory, shardPath); } protected RemoteSegmentStoreDirectory createRemoteSegmentStoreDirectory(ShardId shardId, Path path) throws IOException { @@ -783,7 +784,7 @@ protected RemoteSegmentStoreDirectory createRemoteSegmentStoreDirectory(ShardId RemoteStoreLockManager remoteStoreLockManager = new RemoteStoreMetadataLockManager( new RemoteBufferedOutputDirectory(getBlobContainer(remoteShardPath.resolveIndex())) ); - return new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory, remoteStoreLockManager, threadPool); + return new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory, remoteStoreLockManager, threadPool, shardId); } private RemoteDirectory newRemoteDirectory(Path f) throws IOException { diff --git a/test/framework/src/main/java/org/opensearch/test/AbstractBuilderTestCase.java b/test/framework/src/main/java/org/opensearch/test/AbstractBuilderTestCase.java index 0cfa64df55659..7a8cf4963c4f1 100644 --- a/test/framework/src/main/java/org/opensearch/test/AbstractBuilderTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/AbstractBuilderTestCase.java @@ -35,6 +35,7 @@ import com.carrotsearch.randomizedtesting.RandomizedTest; import com.carrotsearch.randomizedtesting.SeedUtils; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.util.Accountable; import org.opensearch.Version; @@ -69,6 +70,8 @@ import org.opensearch.index.fielddata.IndexFieldDataService; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryBuilderVisitor; import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.similarity.SimilarityService; import org.opensearch.indices.IndicesModule; @@ -316,6 +319,20 @@ protected static QueryShardContext createShardContext() { return createShardContext(null); } + protected static QueryBuilderVisitor createTestVisitor(List visitedQueries) { + return new QueryBuilderVisitor() { + @Override + public void accept(QueryBuilder qb) { + visitedQueries.add(qb); + } + + @Override + public QueryBuilderVisitor getChildVisitor(BooleanClause.Occur occur) { + return this; + } + }; + } + private static class ClientInvocationHandler implements InvocationHandler { AbstractBuilderTestCase delegate; diff --git a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockSpan.java b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockSpan.java index 694a6a2b9e45b..4b661dc0ad0fe 100644 --- a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockSpan.java +++ b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockSpan.java @@ -10,6 +10,8 @@ import org.opensearch.telemetry.tracing.AbstractSpan; import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanCreationContext; +import org.opensearch.telemetry.tracing.SpanKind; import org.opensearch.telemetry.tracing.attributes.Attributes; import java.util.HashMap; @@ -29,6 +31,7 @@ public class MockSpan extends AbstractSpan { private boolean hasEnded; private final Long startTime; private Long endTime; + private final SpanKind spanKind; private final Object lock = new Object(); @@ -36,49 +39,43 @@ public class MockSpan extends AbstractSpan { /** * Base Constructor. - * @param spanName Span Name - * @param parentSpan Parent Span - * @param spanProcessor Span Processor - * @param attributes attributes + * + * @param spanCreationContext Span Creation context. + * @param parentSpan Parent Span + * @param spanProcessor Span Processor */ - public MockSpan(String spanName, Span parentSpan, SpanProcessor spanProcessor, Attributes attributes) { + public MockSpan(SpanCreationContext spanCreationContext, Span parentSpan, SpanProcessor spanProcessor) { this( - spanName, + spanCreationContext.getSpanName(), parentSpan, parentSpan != null ? parentSpan.getTraceId() : IdGenerator.generateTraceId(), IdGenerator.generateSpanId(), spanProcessor, - attributes - ); - } - - /** - * Constructor. - * @param spanName span name. - * @param parentSpan parent span name - * @param spanProcessor span processor. - */ - public MockSpan(String spanName, Span parentSpan, SpanProcessor spanProcessor) { - this( - spanName, - parentSpan, - parentSpan != null ? parentSpan.getTraceId() : IdGenerator.generateTraceId(), - IdGenerator.generateSpanId(), - spanProcessor, - Attributes.EMPTY + spanCreationContext.getAttributes(), + SpanKind.INTERNAL ); } /** * Constructor with traceId and SpanIds - * @param spanName Span Name - * @param parentSpan Parent Span - * @param traceId Trace ID - * @param spanId Span ID - * @param spanProcessor Span Processor - * @param attributes attributes + * + * @param spanName Span Name + * @param parentSpan Parent Span + * @param traceId Trace ID + * @param spanId Span ID + * @param spanProcessor Span Processor + * @param attributes attributes + * @param spanKind type of span. */ - public MockSpan(String spanName, Span parentSpan, String traceId, String spanId, SpanProcessor spanProcessor, Attributes attributes) { + public MockSpan( + String spanName, + Span parentSpan, + String traceId, + String spanId, + SpanProcessor spanProcessor, + Attributes attributes, + SpanKind spanKind + ) { super(spanName, parentSpan); this.spanProcessor = spanProcessor; this.metadata = new HashMap<>(); @@ -88,6 +85,7 @@ public MockSpan(String spanName, Span parentSpan, String traceId, String spanId, if (attributes != null) { this.metadata.putAll(attributes.getAttributesMap()); } + this.spanKind = spanKind; } @Override diff --git a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockTracingContextPropagator.java b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockTracingContextPropagator.java index 7525b4424c243..6d0cd6d0b1290 100644 --- a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockTracingContextPropagator.java +++ b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockTracingContextPropagator.java @@ -10,6 +10,7 @@ import org.opensearch.core.common.Strings; import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanKind; import org.opensearch.telemetry.tracing.TracingContextPropagator; import org.opensearch.telemetry.tracing.attributes.Attributes; @@ -44,7 +45,7 @@ public Optional extract(Map props) { String[] values = value.split(SEPARATOR); String traceId = values[0]; String spanId = values[1]; - return Optional.of(new MockSpan(null, null, traceId, spanId, spanProcessor, Attributes.EMPTY)); + return Optional.of(new MockSpan(null, null, traceId, spanId, spanProcessor, Attributes.EMPTY, SpanKind.INTERNAL)); } else { return Optional.empty(); } diff --git a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockTracingTelemetry.java b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockTracingTelemetry.java index c7f5943719230..39817a208bd18 100644 --- a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockTracingTelemetry.java +++ b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockTracingTelemetry.java @@ -9,9 +9,9 @@ package org.opensearch.test.telemetry.tracing; import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanCreationContext; import org.opensearch.telemetry.tracing.TracingContextPropagator; import org.opensearch.telemetry.tracing.TracingTelemetry; -import org.opensearch.telemetry.tracing.attributes.Attributes; import java.util.concurrent.atomic.AtomicBoolean; @@ -29,8 +29,8 @@ public class MockTracingTelemetry implements TracingTelemetry { public MockTracingTelemetry() {} @Override - public Span createSpan(String spanName, Span parentSpan, Attributes attributes) { - Span span = new MockSpan(spanName, parentSpan, spanProcessor, attributes); + public Span createSpan(SpanCreationContext spanCreationContext, Span parentSpan) { + Span span = new MockSpan(spanCreationContext, parentSpan, spanProcessor); if (shutdown.get() == false) { spanProcessor.onStart(span); }