diff --git a/.github/workflows/gradle-check.yml b/.github/workflows/gradle-check.yml index ba1dc3dc3b5e6..f895dfc2c1f4d 100644 --- a/.github/workflows/gradle-check.yml +++ b/.github/workflows/gradle-check.yml @@ -22,34 +22,34 @@ jobs: timeout-minutes: 130 steps: - name: Checkout OpenSearch repo - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: ref: ${{ github.event.pull_request.head.sha }} - name: Setup environment variables (PR) if: github.event_name == 'pull_request_target' run: | - echo "pr_from_sha=$(jq --raw-output .pull_request.head.sha $GITHUB_EVENT_PATH)" >> $GITHUB_ENV - echo "pr_from_clone_url=$(jq --raw-output .pull_request.head.repo.clone_url $GITHUB_EVENT_PATH)" >> $GITHUB_ENV - echo "pr_to_clone_url=$(jq --raw-output .pull_request.base.repo.clone_url $GITHUB_EVENT_PATH)" >> $GITHUB_ENV - echo "pr_title=$(jq --raw-output .pull_request.title $GITHUB_EVENT_PATH)" >> $GITHUB_ENV - echo "pr_number=$(jq --raw-output .pull_request.number $GITHUB_EVENT_PATH)" >> $GITHUB_ENV + echo "pr_from_sha=$(jq --raw-output .pull_request.head.sha $GITHUB_EVENT_PATH)" >> $GITHUB_ENV + echo "pr_from_clone_url=$(jq --raw-output .pull_request.head.repo.clone_url $GITHUB_EVENT_PATH)" >> $GITHUB_ENV + echo "pr_to_clone_url=$(jq --raw-output .pull_request.base.repo.clone_url $GITHUB_EVENT_PATH)" >> $GITHUB_ENV + echo "pr_title=$(jq --raw-output .pull_request.title $GITHUB_EVENT_PATH)" >> $GITHUB_ENV + echo "pr_number=$(jq --raw-output .pull_request.number $GITHUB_EVENT_PATH)" >> $GITHUB_ENV - name: Setup environment variables (Push) if: github.event_name == 'push' run: | - repo_url="https://github.com/opensearch-project/OpenSearch" - ref_id=$(git rev-parse HEAD) - branch_name=$(git rev-parse --abbrev-ref HEAD) - echo "branch_name=$branch_name" >> $GITHUB_ENV - echo "pr_from_sha=$ref_id" >> $GITHUB_ENV - echo "pr_from_clone_url=$repo_url" >> $GITHUB_ENV - echo "pr_to_clone_url=$repo_url" >> $GITHUB_ENV - echo "pr_title=Push trigger $branch_name $ref_id $repo_url" >> $GITHUB_ENV - echo "pr_number=Null" >> $GITHUB_ENV + repo_url="https://github.com/opensearch-project/OpenSearch" + ref_id=$(git rev-parse HEAD) + branch_name=$(git rev-parse --abbrev-ref HEAD) + echo "branch_name=$branch_name" >> $GITHUB_ENV + echo "pr_from_sha=$ref_id" >> $GITHUB_ENV + echo "pr_from_clone_url=$repo_url" >> $GITHUB_ENV + echo "pr_to_clone_url=$repo_url" >> $GITHUB_ENV + echo "pr_title=Push trigger $branch_name $ref_id $repo_url" >> $GITHUB_ENV + echo "pr_number=Null" >> $GITHUB_ENV - name: Checkout opensearch-build repo - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: repository: opensearch-project/opensearch-build ref: main @@ -57,17 +57,17 @@ jobs: - name: Trigger jenkins workflow to run gradle check run: | - set -e - set -o pipefail - bash opensearch-build/scripts/gradle/gradle-check.sh ${{ secrets.JENKINS_GRADLE_CHECK_GENERIC_WEBHOOK_TOKEN }} | tee -a gradle-check.log + set -e + set -o pipefail + bash opensearch-build/scripts/gradle/gradle-check.sh ${{ secrets.JENKINS_GRADLE_CHECK_GENERIC_WEBHOOK_TOKEN }} | tee -a gradle-check.log - name: Setup Result Status if: always() run: | - WORKFLOW_URL=`cat gradle-check.log | grep 'WORKFLOW_URL' | awk '{print $2}'` - RESULT=`cat gradle-check.log | grep 'Result:' | awk '{print $2}'` - echo "workflow_url=$WORKFLOW_URL" >> $GITHUB_ENV - echo "result=$RESULT" >> $GITHUB_ENV + WORKFLOW_URL=`cat gradle-check.log | grep 'WORKFLOW_URL' | awk '{print $2}'` + RESULT=`cat gradle-check.log | grep 'Result:' | awk '{print $2}'` + echo "workflow_url=$WORKFLOW_URL" >> $GITHUB_ENV + echo "result=$RESULT" >> $GITHUB_ENV - name: Upload Coverage Report if: success() @@ -81,25 +81,25 @@ jobs: with: issue-number: ${{ env.pr_number }} body: | - ### Gradle Check (Jenkins) Run Completed with: - * **RESULT:** ${{ env.result }} :white_check_mark: - * **URL:** ${{ env.workflow_url }} - * **CommitID:** ${{ env.pr_from_sha }} + ### Gradle Check (Jenkins) Run Completed with: + * **RESULT:** ${{ env.result }} :white_check_mark: + * **URL:** ${{ env.workflow_url }} + * **CommitID:** ${{ env.pr_from_sha }} - name: Extract Test Failure if: ${{ github.event_name == 'pull_request_target' && env.result != 'SUCCESS' }} run: | - TEST_FAILURES=`curl -s "${{ env.workflow_url }}/testReport/api/json?tree=suites\[cases\[status,className,name\]\]" | jq -r '.. | objects | select(.status=="FAILED",.status=="REGRESSION") | (.className + "." + .name)' | uniq -c | sort -n -r | head -n 10` - if [[ "$TEST_FAILURES" != "" ]] - then - echo "test_failures<> $GITHUB_ENV - echo "" >> $GITHUB_ENV - echo "* **TEST FAILURES:**" >> $GITHUB_ENV - echo '```' >> $GITHUB_ENV - echo "$TEST_FAILURES" >> $GITHUB_ENV - echo '```' >> $GITHUB_ENV - echo "EOF" >> $GITHUB_ENV - fi + TEST_FAILURES=`curl -s "${{ env.workflow_url }}/testReport/api/json?tree=suites\[cases\[status,className,name\]\]" | jq -r '.. | objects | select(.status=="FAILED",.status=="REGRESSION") | (.className + "." + .name)' | uniq -c | sort -n -r | head -n 10` + if [[ "$TEST_FAILURES" != "" ]] + then + echo "test_failures<> $GITHUB_ENV + echo "" >> $GITHUB_ENV + echo "* **TEST FAILURES:**" >> $GITHUB_ENV + echo '```' >> $GITHUB_ENV + echo "$TEST_FAILURES" >> $GITHUB_ENV + echo '```' >> $GITHUB_ENV + echo "EOF" >> $GITHUB_ENV + fi - name: Create Comment Flaky if: ${{ github.event_name == 'pull_request_target' && success() && env.result != 'SUCCESS' }} @@ -119,12 +119,12 @@ jobs: with: issue-number: ${{ env.pr_number }} body: | - ### Gradle Check (Jenkins) Run Completed with: - * **RESULT:** ${{ env.result }} :x: ${{ env.test_failures }} - * **URL:** ${{ env.workflow_url }} - * **CommitID:** ${{ env.pr_from_sha }} - Please examine the workflow log, locate, and copy-paste the failure(s) below, then iterate to green. - Is the failure [a flaky test](https://github.com/opensearch-project/OpenSearch/blob/main/DEVELOPER_GUIDE.md#flaky-tests) unrelated to your change? + ### Gradle Check (Jenkins) Run Completed with: + * **RESULT:** ${{ env.result }} :x: ${{ env.test_failures }} + * **URL:** ${{ env.workflow_url }} + * **CommitID:** ${{ env.pr_from_sha }} + Please examine the workflow log, locate, and copy-paste the failure(s) below, then iterate to green. + Is the failure [a flaky test](https://github.com/opensearch-project/OpenSearch/blob/main/DEVELOPER_GUIDE.md#flaky-tests) unrelated to your change? - name: Create Issue On Push Failure if: ${{ github.event_name == 'push' && failure() }} diff --git a/.github/workflows/precommit.yml b/.github/workflows/precommit.yml index e264d65cdf191..8bbba657737c8 100644 --- a/.github/workflows/precommit.yml +++ b/.github/workflows/precommit.yml @@ -1,12 +1,12 @@ name: Gradle Precommit on: [pull_request] - + jobs: precommit: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-latest, windows-latest, macos-latest] + os: [windows-latest, macos-latest] # precommit on ubuntu-latest is run as a part of the gradle-check workflow steps: - uses: actions/checkout@v2 - name: Set up JDK 11 diff --git a/.github/workflows/stalled.yml b/.github/workflows/stalled.yml new file mode 100644 index 0000000000000..0d03049a2e23c --- /dev/null +++ b/.github/workflows/stalled.yml @@ -0,0 +1,28 @@ +name: Close Stalled PRs +on: + schedule: + - cron: '15 15 * * *' # Run every day at 15:15 UTC / 7:15 PST / 8:15 PDT +permissions: + pull-requests: write +jobs: + stale: + runs-on: ubuntu-latest + steps: + - name: GitHub App token + id: github_app_token + uses: tibdex/github-app-token@v1.5.0 + with: + app_id: ${{ secrets.APP_ID }} + private_key: ${{ secrets.APP_PRIVATE_KEY }} + installation_id: 22958780 + - name: Stale PRs + uses: actions/stale@v8 + with: + repo-token: ${{ steps.github_app_token.outputs.token }} + stale-pr-label: 'stalled' + stale-pr-message: 'This PR is stalled because it has been open for 30 days with no activity. Remove stalled label or comment or this will be closed in 7 days.' + close-pr-message: 'This PR was closed because it has been stalled for 7 days with no activity.' + days-before-pr-stale: 30 + days-before-pr-close: 7 + days-before-issue-stale: -1 + days-before-issue-close: -1 diff --git a/.github/workflows/version.yml b/.github/workflows/version.yml index eaf4d085c6946..fdf42a9a2731e 100644 --- a/.github/workflows/version.yml +++ b/.github/workflows/version.yml @@ -30,7 +30,11 @@ jobs: CURRENT_VERSION_ARRAY[2]=$((CURRENT_VERSION_ARRAY[2]+1)) NEXT_VERSION=$(IFS=. ; echo "${CURRENT_VERSION_ARRAY[*]:0:3}") NEXT_VERSION_UNDERSCORE=$(IFS=_ ; echo "V_${CURRENT_VERSION_ARRAY[*]:0:3}") - NEXT_VERSION_ID=$(IFS=0 ; echo "${CURRENT_VERSION_ARRAY[*]:0:3}99") + if [[ ${#CURRENT_VERSION_ARRAY[2]} -gt 1 ]]; then + NEXT_VERSION_ID="${CURRENT_VERSION_ARRAY[0]:0:3}0${CURRENT_VERSION_ARRAY[1]:0:3}${CURRENT_VERSION_ARRAY[2]:0:3}99" + else + NEXT_VERSION_ID=$(IFS=0 ; echo "${CURRENT_VERSION_ARRAY[*]:0:3}99") + fi echo "TAG=$TAG" >> $GITHUB_ENV echo "BASE=$BASE" >> $GITHUB_ENV echo "BASE_X=$BASE_X" >> $GITHUB_ENV diff --git a/CHANGELOG.md b/CHANGELOG.md index cb67e56c7845f..99faefda9ad87 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,7 +14,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Dependencies - Bump `log4j-core` from 2.18.0 to 2.19.0 - Bump `forbiddenapis` from 3.3 to 3.4 -- Bump `avro` from 1.11.0 to 1.11.1 +- Bump `avro` from 1.11.1 to 1.11.2 - Bump `woodstox-core` from 6.3.0 to 6.3.1 - Bump `xmlbeans` from 5.1.0 to 5.1.1 ([#4354](https://github.com/opensearch-project/OpenSearch/pull/4354)) - Bump `reactor-netty-core` from 1.0.19 to 1.0.22 ([#4447](https://github.com/opensearch-project/OpenSearch/pull/4447)) @@ -35,6 +35,12 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `io.opencensus:opencensus-api` from 0.18.0 to 0.31.1 ([#7291](https://github.com/opensearch-project/OpenSearch/pull/7291)) - OpenJDK Update (April 2023 Patch releases) ([#7344](https://github.com/opensearch-project/OpenSearch/pull/7344) - Bump `com.google.http-client:google-http-client:1.43.2` from 1.42.0 to 1.43.2 ([7928](https://github.com/opensearch-project/OpenSearch/pull/7928))) +- Add Opentelemetry dependencies ([#7543](https://github.com/opensearch-project/OpenSearch/issues/7543)) +- Bump `org.bouncycastle:bcprov-jdk15on` to `org.bouncycastle:bcprov-jdk15to18` version 1.75 ([8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) +- Bump `org.bouncycastle:bcmail-jdk15on` to `org.bouncycastle:bcmail-jdk15to18` version 1.75 ([8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) +- Bump `org.bouncycastle:bcpkix-jdk15on` to `org.bouncycastle:bcpkix-jdk15to18` version 1.75 ([8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) + + ### Changed - [CCR] Add getHistoryOperationsFromTranslog method to fetch the history snapshot from translogs ([#3948](https://github.com/opensearch-project/OpenSearch/pull/3948)) @@ -44,6 +50,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Change http code for DecommissioningFailedException from 500 to 400 ([#5283](https://github.com/opensearch-project/OpenSearch/pull/5283)) - Improve summary error message for invalid setting updates ([#4792](https://github.com/opensearch-project/OpenSearch/pull/4792)) - Pass localNode info to all plugins on node start ([#7919](https://github.com/opensearch-project/OpenSearch/pull/7919)) +- Improved performance of parsing floating point numbers ([#7909](https://github.com/opensearch-project/OpenSearch/pull/7909)) +- Move span actions to Scope ([#8411](https://github.com/opensearch-project/OpenSearch/pull/8411)) ### Deprecated @@ -69,19 +77,27 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Support OpenSSL Provider with default Netty allocator ([#5460](https://github.com/opensearch-project/OpenSearch/pull/5460)) - Replaces ZipInputStream with ZipFile to fix Zip Slip vulnerability ([#7230](https://github.com/opensearch-project/OpenSearch/pull/7230)) - Add missing validation/parsing of SearchBackpressureMode of SearchBackpressureSettings ([#7541](https://github.com/opensearch-project/OpenSearch/pull/7541)) +- Adds log4j configuration for telemetry LogSpanExporter ([#8393](https://github.com/opensearch-project/OpenSearch/pull/8393)) ### Security ## [Unreleased 2.x] ### Added +- [SearchPipeline] Add new search pipeline processor type, SearchPhaseResultsProcessor, that can modify the result of one search phase before starting the next phase.([#7283](https://github.com/opensearch-project/OpenSearch/pull/7283)) - Add task cancellation monitoring service ([#7642](https://github.com/opensearch-project/OpenSearch/pull/7642)) - Add TokenManager Interface ([#7452](https://github.com/opensearch-project/OpenSearch/pull/7452)) - Add Remote store as a segment replication source ([#7653](https://github.com/opensearch-project/OpenSearch/pull/7653)) - Implement concurrent aggregations support without profile option ([#7514](https://github.com/opensearch-project/OpenSearch/pull/7514)) - Add dynamic index and cluster setting for concurrent segment search ([#7956](https://github.com/opensearch-project/OpenSearch/pull/7956)) - Add descending order search optimization through reverse segment read. ([#7967](https://github.com/opensearch-project/OpenSearch/pull/7967)) +- [Search pipelines] Added search pipelines output to node stats ([#8053](https://github.com/opensearch-project/OpenSearch/pull/8053)) - Update components of segrep backpressure to support remote store. ([#8020](https://github.com/opensearch-project/OpenSearch/pull/8020)) - Make remote cluster connection setup in async ([#8038](https://github.com/opensearch-project/OpenSearch/pull/8038)) +- Add API to initialize extensions ([#8029]()https://github.com/opensearch-project/OpenSearch/pull/8029) +- Add distributed tracing framework ([#7543](https://github.com/opensearch-project/OpenSearch/issues/7543)) +- Enable Point based optimization for custom comparators ([#8168](https://github.com/opensearch-project/OpenSearch/pull/8168)) +- [Extensions] Support extension additional settings with extension REST initialization ([#8414](https://github.com/opensearch-project/OpenSearch/pull/8414)) +- Adds mock implementation for TelemetryPlugin ([#7545](https://github.com/opensearch-project/OpenSearch/issues/7545)) ### Dependencies - Bump `com.azure:azure-storage-common` from 12.21.0 to 12.21.1 (#7566, #7814) @@ -94,13 +110,13 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `netty` from 4.1.91.Final to 4.1.93.Final ([#7901](https://github.com/opensearch-project/OpenSearch/pull/7901)) - Bump `com.amazonaws` 1.12.270 to `software.amazon.awssdk` 2.20.55 ([7372](https://github.com/opensearch-project/OpenSearch/pull/7372/)) - Add `org.reactivestreams` 1.0.4 ([7372](https://github.com/opensearch-project/OpenSearch/pull/7372/)) -- Bump `com.networknt:json-schema-validator` from 1.0.81 to 1.0.83 ([7968](https://github.com/opensearch-project/OpenSearch/pull/7968)) +- Bump `com.networknt:json-schema-validator` from 1.0.81 to 1.0.85 ([7968], #8255) - Bump `com.netflix.nebula:gradle-extra-configurations-plugin` from 9.0.0 to 10.0.0 in /buildSrc ([#7068](https://github.com/opensearch-project/OpenSearch/pull/7068)) - Bump `com.google.guava:guava` from 32.0.0-jre to 32.0.1-jre (#8009) - Bump `commons-io:commons-io` from 2.12.0 to 2.13.0 (#8014, #8013, #8010) - Bump `com.diffplug.spotless` from 6.18.0 to 6.19.0 (#8007) - Bump `'com.azure:azure-storage-blob` to 12.22.2 from 12.21.1 ([#8043](https://github.com/opensearch-project/OpenSearch/pull/8043)) -- Bump `org.jruby.joni:joni` from 2.1.48 to 2.2.1 (#8015) +- Bump `org.jruby.joni:joni` from 2.1.48 to 2.2.1 (#8015, #8254) - Bump `com.google.guava:guava` from 32.0.0-jre to 32.0.1-jre ([#8011](https://github.com/opensearch-project/OpenSearch/pull/8011), [#8012](https://github.com/opensearch-project/OpenSearch/pull/8012), [#8107](https://github.com/opensearch-project/OpenSearch/pull/8107)) - Bump `io.projectreactor:reactor-core` from 3.4.18 to 3.5.6 in /plugins/repository-azure ([#8016](https://github.com/opensearch-project/OpenSearch/pull/8016)) - Bump `spock-core` from 2.1-groovy-3.0 to 2.3-groovy-3.0 ([#8122](https://github.com/opensearch-project/OpenSearch/pull/8122)) @@ -108,6 +124,15 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.netflix.nebula:gradle-info-plugin` from 12.1.3 to 12.1.4 (#8139) - Bump `commons-io:commons-io` from 2.12.0 to 2.13.0 in /plugins/discovery-azure-classic ([#8140](https://github.com/opensearch-project/OpenSearch/pull/8140)) - Bump `mockito` from 5.2.0 to 5.4.0 ([#8181](https://github.com/opensearch-project/OpenSearch/pull/8181)) +- Bump `netty` from 4.1.93.Final to 4.1.94.Final ([#8191](https://github.com/opensearch-project/OpenSearch/pull/8191)) +- Bump `org.apache.hadoop:hadoop-minicluster` from 3.3.5 to 3.3.6 (#8257) +- Bump `io.projectreactor.netty:reactor-netty-http` from 1.1.7 to 1.1.8 (#8256) +- [Upgrade] Lucene 9.7.0 release (#8272) +- Bump `org.jboss.resteasy:resteasy-jackson2-provider` from 3.0.26.Final to 6.2.4.Final in /qa/wildfly ([#8209](https://github.com/opensearch-project/OpenSearch/pull/8209)) +- Bump `com.google.api-client:google-api-client` from 1.34.0 to 2.2.0 ([#8276](https://github.com/opensearch-project/OpenSearch/pull/8276)) +- Update Apache HttpCore/ HttpClient and Apache HttpCore5 / HttpClient5 dependencies ([#8434](https://github.com/opensearch-project/OpenSearch/pull/8434)) +- Bump `org.apache.maven:maven-model` from 3.9.2 to 3.9.3 (#8403) +- Bump `io.projectreactor.netty:reactor-netty` and `io.projectreactor.netty:reactor-netty-core` from 1.1.7 to 1.1.8 (#8405) ### Changed - Replace jboss-annotations-api_1.2_spec with jakarta.annotation-api ([#7836](https://github.com/opensearch-project/OpenSearch/pull/7836)) @@ -122,19 +147,26 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Compress and cache cluster state during validate join request ([#7321](https://github.com/opensearch-project/OpenSearch/pull/7321)) - [Snapshot Interop] Add Changes in Create Snapshot Flow for remote store interoperability. ([#7118](https://github.com/opensearch-project/OpenSearch/pull/7118)) - Allow insecure string settings to warn-log usage and advise to migration of a newer secure variant ([#5496](https://github.com/opensearch-project/OpenSearch/pull/5496)) +- Add self-organizing hash table to improve the performance of bucket aggregations ([#7652](https://github.com/opensearch-project/OpenSearch/pull/7652)) +- Check UTF16 string size before converting to String to avoid OOME ([#7963](https://github.com/opensearch-project/OpenSearch/pull/7963)) +- Move ZSTD compression codecs out of the sandbox ([#7908](https://github.com/opensearch-project/OpenSearch/pull/7908)) + ### Deprecated ### Removed - Remove `COMPRESSOR` variable from `CompressorFactory` and use `DEFLATE_COMPRESSOR` instead ([7907](https://github.com/opensearch-project/OpenSearch/pull/7907)) +- Remove concurrency based minimum file cache size restriction ([#8294](https://github.com/opensearch-project/OpenSearch/pull/8294)) ### Fixed - Fixing error: adding a new/forgotten parameter to the configuration for checking the config on startup in plugins/repository-s3 #7924 - Enforce 512 byte document ID limit in bulk updates ([#8039](https://github.com/opensearch-project/OpenSearch/pull/8039)) - With only GlobalAggregation in request causes unnecessary wrapping with MultiCollector ([#8125](https://github.com/opensearch-project/OpenSearch/pull/8125)) - Fix mapping char_filter when mapping a hashtag ([#7591](https://github.com/opensearch-project/OpenSearch/pull/7591)) +- Fix NPE in multiterms aggregations involving empty buckets ([#7318](https://github.com/opensearch-project/OpenSearch/pull/7318)) +- Precise system clock time in MasterService debug logs ([#7902](https://github.com/opensearch-project/OpenSearch/pull/7902)) ### Security [Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD -[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.8...2.x +[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.8...2.x \ No newline at end of file diff --git a/benchmarks/src/main/java/org/opensearch/common/util/LongHashBenchmark.java b/benchmarks/src/main/java/org/opensearch/common/util/LongHashBenchmark.java new file mode 100644 index 0000000000000..fa75dd2c91f5a --- /dev/null +++ b/benchmarks/src/main/java/org/opensearch/common/util/LongHashBenchmark.java @@ -0,0 +1,425 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.util; + +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; +import org.opensearch.common.lease.Releasable; + +import java.util.Random; +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; + +@Fork(value = 3) +@Warmup(iterations = 1, time = 4) +@Measurement(iterations = 3, time = 2) +@BenchmarkMode(Mode.AverageTime) +@OutputTimeUnit(TimeUnit.MILLISECONDS) +public class LongHashBenchmark { + + @Benchmark + public void add(Blackhole bh, HashTableOptions tableOpts, WorkloadOptions workloadOpts) { + try (HashTable table = tableOpts.get(); WorkloadIterator iter = workloadOpts.iter()) { + while (iter.hasNext()) { + bh.consume(table.add(iter.next())); + } + } + } + + /** + * Creates a hash table with varying parameters. + */ + @State(Scope.Benchmark) + public static class HashTableOptions { + + @Param({ "LongHash", "ReorganizingLongHash" }) + public String type; + + @Param({ "1" }) + public long initialCapacity; + + @Param({ "0.6" }) + public float loadFactor; + + private Supplier supplier; + + @Setup + public void setup() { + switch (type) { + case "LongHash": + supplier = this::newLongHash; + break; + case "ReorganizingLongHash": + supplier = this::newReorganizingLongHash; + break; + default: + throw new IllegalArgumentException("invalid hash table type: " + type); + } + } + + public HashTable get() { + return supplier.get(); + } + + private HashTable newLongHash() { + return new HashTable() { + private final LongHash table = new LongHash(initialCapacity, loadFactor, BigArrays.NON_RECYCLING_INSTANCE); + + @Override + public long add(long key) { + return table.add(key); + } + + @Override + public void close() { + table.close(); + } + }; + } + + private HashTable newReorganizingLongHash() { + return new HashTable() { + private final ReorganizingLongHash table = new ReorganizingLongHash( + initialCapacity, + loadFactor, + BigArrays.NON_RECYCLING_INSTANCE + ); + + @Override + public long add(long key) { + return table.add(key); + } + + @Override + public void close() { + table.close(); + } + }; + } + } + + /** + * Creates a workload with varying parameters. + */ + @State(Scope.Benchmark) + public static class WorkloadOptions { + public static final int NUM_HITS = 20_000_000; + + /** + * Repeat the experiment with growing number of keys. + * These values are generated with an exponential growth pattern such that: + * value = ceil(previous_value * random_float_between(1.0, 1.14)) + */ + @Param({ + "1", + "2", + "3", + "4", + "5", + "6", + "7", + "8", + "9", + "10", + "11", + "13", + "15", + "17", + "18", + "19", + "20", + "21", + "23", + "26", + "27", + "30", + "32", + "35", + "41", + "45", + "50", + "53", + "54", + "55", + "57", + "63", + "64", + "69", + "74", + "80", + "84", + "91", + "98", + "101", + "111", + "114", + "124", + "128", + "139", + "148", + "161", + "162", + "176", + "190", + "204", + "216", + "240", + "257", + "269", + "291", + "302", + "308", + "327", + "341", + "374", + "402", + "412", + "438", + "443", + "488", + "505", + "558", + "612", + "621", + "623", + "627", + "642", + "717", + "765", + "787", + "817", + "915", + "962", + "1011", + "1083", + "1163", + "1237", + "1301", + "1424", + "1541", + "1716", + "1805", + "1817", + "1934", + "2024", + "2238", + "2281", + "2319", + "2527", + "2583", + "2639", + "2662", + "2692", + "2991", + "3201", + "3215", + "3517", + "3681", + "3710", + "4038", + "4060", + "4199", + "4509", + "4855", + "5204", + "5624", + "6217", + "6891", + "7569", + "8169", + "8929", + "9153", + "10005", + "10624", + "10931", + "12070", + "12370", + "13694", + "14227", + "15925", + "17295", + "17376", + "18522", + "19200", + "20108", + "21496", + "23427", + "24224", + "26759", + "29199", + "29897", + "32353", + "33104", + "36523", + "38480", + "38958", + "40020", + "44745", + "45396", + "47916", + "49745", + "49968", + "52231", + "53606" }) + public int size; + + @Param({ "correlated", "uncorrelated", "distinct" }) + public String dataset; + + private WorkloadIterator iterator; + + @Setup + public void setup() { + switch (dataset) { + case "correlated": + iterator = newCorrelatedWorkload(); + break; + case "uncorrelated": + iterator = newUncorrelatedWorkload(); + break; + case "distinct": + iterator = newDistinctWorkload(); + break; + default: + throw new IllegalArgumentException("invalid dataset: " + dataset); + } + } + + public WorkloadIterator iter() { + return iterator; + } + + /** + * Simulates monotonically increasing timestamp data with multiple hits mapping to the same key. + */ + private WorkloadIterator newCorrelatedWorkload() { + assert NUM_HITS >= size : "ensure hits >= size so that each key is used at least once"; + + final long[] data = new long[size]; + for (int i = 0; i < data.length; i++) { + data[i] = 1420070400000L + 3600000L * i; + } + + return new WorkloadIterator() { + private int count = 0; + private int index = 0; + private int remaining = NUM_HITS / data.length; + + @Override + public boolean hasNext() { + return count < NUM_HITS; + } + + @Override + public long next() { + if (--remaining <= 0) { + index = (index + 1) % data.length; + remaining = NUM_HITS / data.length; + } + count++; + return data[index]; + } + + @Override + public void reset() { + count = 0; + index = 0; + remaining = NUM_HITS / data.length; + } + }; + } + + /** + * Simulates uncorrelated data (such as travel distance / fare amount). + */ + private WorkloadIterator newUncorrelatedWorkload() { + assert NUM_HITS >= size : "ensure hits >= size so that each key is used at least once"; + + final Random random = new Random(0); // fixed seed for reproducible results + final long[] data = new long[size]; + for (int i = 0; i < data.length; i++) { + data[i] = Double.doubleToLongBits(20.0 + 80 * random.nextDouble()); + } + + return new WorkloadIterator() { + private int count = 0; + private int index = 0; + + @Override + public boolean hasNext() { + return count < NUM_HITS; + } + + @Override + public long next() { + count++; + index = (index + 1) % data.length; + return data[index]; + } + + @Override + public void reset() { + count = 0; + index = 0; + } + }; + } + + /** + * Simulates workload with high cardinality, i.e., each hit mapping to a different key. + */ + private WorkloadIterator newDistinctWorkload() { + return new WorkloadIterator() { + private int count = 0; + + @Override + public boolean hasNext() { + return count < size; + } + + @Override + public long next() { + return count++; + } + + @Override + public void reset() { + count = 0; + } + }; + } + } + + private interface HashTable extends Releasable { + long add(long key); + } + + private interface WorkloadIterator extends Releasable { + boolean hasNext(); + + long next(); + + void reset(); + + @Override + default void close() { + reset(); + } + } +} diff --git a/build.gradle b/build.gradle index 4b188718e5f30..eec01e840149e 100644 --- a/build.gradle +++ b/build.gradle @@ -46,6 +46,7 @@ import org.gradle.plugins.ide.eclipse.model.EclipseJdt import org.gradle.plugins.ide.eclipse.model.SourceFolder import org.gradle.api.Project; import org.gradle.process.ExecResult; +import org.opensearch.gradle.CheckCompatibilityTask import static org.opensearch.gradle.util.GradleUtils.maybeConfigure @@ -643,3 +644,15 @@ tasks.withType(TestTaskReports).configureEach { tasks.named(JavaBasePlugin.CHECK_TASK_NAME) { dependsOn tasks.named('testAggregateTestReport', TestReport) } + +tasks.register('checkCompatibility', CheckCompatibilityTask) { + description('Checks the compatibility with child components') +} + +allprojects { project -> + project.afterEvaluate { + if (project.tasks.findByName('publishToMavenLocal')) { + checkCompatibility.dependsOn(project.tasks.publishToMavenLocal) + } + } +} diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 314d27602b4ad..47d4747d4b9a6 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -117,11 +117,12 @@ dependencies { api 'de.thetaphi:forbiddenapis:3.5.1' api 'com.avast.gradle:gradle-docker-compose-plugin:0.16.12' api "org.yaml:snakeyaml:${props.getProperty('snakeyaml')}" - api 'org.apache.maven:maven-model:3.9.2' - api 'com.networknt:json-schema-validator:1.0.84' + api 'org.apache.maven:maven-model:3.9.3' + api 'com.networknt:json-schema-validator:1.0.85' api 'org.jruby.jcodings:jcodings:1.0.58' - api 'org.jruby.joni:joni:2.1.48' + api 'org.jruby.joni:joni:2.2.1' api "com.fasterxml.jackson.core:jackson-databind:${props.getProperty('jackson_databind')}" + api "org.ajoberstar.grgit:grgit-core:5.2.0" testFixturesApi "junit:junit:${props.getProperty('junit')}" testFixturesApi "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${props.getProperty('randomizedrunner')}" diff --git a/buildSrc/src/main/groovy/org/opensearch/gradle/CheckCompatibilityTask.groovy b/buildSrc/src/main/groovy/org/opensearch/gradle/CheckCompatibilityTask.groovy new file mode 100644 index 0000000000000..ee6446fec6d57 --- /dev/null +++ b/buildSrc/src/main/groovy/org/opensearch/gradle/CheckCompatibilityTask.groovy @@ -0,0 +1,108 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gradle + +import groovy.json.JsonSlurper +import org.ajoberstar.grgit.Grgit +import org.ajoberstar.grgit.operation.BranchListOp +import org.gradle.api.DefaultTask +import org.gradle.api.tasks.Input +import org.gradle.api.tasks.Internal +import org.gradle.api.tasks.TaskAction +import org.gradle.internal.os.OperatingSystem + +import java.nio.file.Paths + +class CheckCompatibilityTask extends DefaultTask { + + static final String REPO_URL = 'https://raw.githubusercontent.com/opensearch-project/opensearch-plugins/main/plugins/.meta' + + @Input + List repositoryUrls = project.hasProperty('repositoryUrls') ? project.property('repositoryUrls').split(',') : getRepoUrls() + + @Input + String ref = project.hasProperty('ref') ? project.property('ref') : 'main' + + @Internal + List failedComponents = [] + + @Internal + List gitFailedComponents = [] + + @Internal + List compatibleComponents = [] + + @TaskAction + void checkCompatibility() { + logger.info("Checking compatibility for: $repositoryUrls for $ref") + repositoryUrls.parallelStream().forEach { repositoryUrl -> + def tempDir = File.createTempDir() + try { + if (cloneAndCheckout(repositoryUrl, tempDir)) { + if (repositoryUrl.toString().endsWithAny('notifications', 'notifications.git')) { + tempDir = Paths.get(tempDir.getAbsolutePath(), 'notifications') + } + project.exec { + workingDir = tempDir + def stdout = new ByteArrayOutputStream() + executable = (OperatingSystem.current().isWindows()) ? 'gradlew.bat' : './gradlew' + args 'assemble' + standardOutput stdout + } + compatibleComponents.add(repositoryUrl) + } else { + logger.lifecycle("Skipping compatibility check for $repositoryUrl") + } + } catch (ex) { + failedComponents.add(repositoryUrl) + logger.info("Gradle assemble failed for $repositoryUrl", ex) + } finally { + tempDir.deleteDir() + } + } + if (!failedComponents.isEmpty()) { + logger.lifecycle("Incompatible components: $failedComponents") + logger.info("Compatible components: $compatibleComponents") + } + if (!gitFailedComponents.isEmpty()) { + logger.lifecycle("Components skipped due to git failures: $gitFailedComponents") + logger.info("Compatible components: $compatibleComponents") + } + if (!compatibleComponents.isEmpty()) { + logger.lifecycle("Compatible components: $compatibleComponents") + } + } + + protected static List getRepoUrls() { + def json = new JsonSlurper().parse(REPO_URL.toURL()) + def labels = json.projects.values() + return labels as List + } + + protected boolean cloneAndCheckout(repoUrl, directory) { + try { + def grgit = Grgit.clone(dir: directory, uri: repoUrl) + def remoteBranches = grgit.branch.list(mode: BranchListOp.Mode.REMOTE) + String targetBranch = 'origin/' + ref + if (remoteBranches.find { it.name == targetBranch } == null) { + gitFailedComponents.add(repoUrl) + logger.info("$ref does not exist for $repoUrl. Skipping the compatibility check!!") + return false + } else { + logger.info("Checking out $targetBranch") + grgit.checkout(branch: targetBranch) + return true + } + } catch (ex) { + logger.error('Exception occurred during GitHub operations', ex) + gitFailedComponents.add(repoUrl) + return false + } + } +} diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 84d3b2b814430..408b03e60cc5d 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ opensearch = 3.0.0 -lucene = 9.7.0-snapshot-204acc3 +lucene = 9.7.0 bundled_jdk_vendor = adoptium bundled_jdk = 20.0.1+9 @@ -29,14 +29,14 @@ jakarta_annotation = 1.3.5 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 5.5.0 -netty = 4.1.93.Final +netty = 4.1.94.Final joda = 2.12.2 # client dependencies -httpclient5 = 5.1.4 -httpcore5 = 5.1.5 -httpclient = 4.5.13 -httpcore = 4.4.15 +httpclient5 = 5.2.1 +httpcore5 = 5.2.2 +httpclient = 4.5.14 +httpcore = 4.4.16 httpasyncclient = 4.1.5 commonslogging = 1.2 commonscodec = 1.15 @@ -48,7 +48,7 @@ reactivestreams = 1.0.4 # when updating this version, you need to ensure compatibility with: # - plugins/ingest-attachment (transitive dependency, check the upstream POM) # - distribution/tools/plugin-cli -bouncycastle=1.70 +bouncycastle=1.75 # test dependencies randomizedrunner = 2.7.1 junit = 4.13.2 @@ -64,3 +64,9 @@ jmh = 1.35 zstd = 1.5.5-3 jzlib = 1.1.3 + +resteasy = 6.2.4.Final + +# opentelemetry dependencies +opentelemetry = 1.26.0 + diff --git a/client/rest/licenses/httpclient5-5.1.4.jar.sha1 b/client/rest/licenses/httpclient5-5.1.4.jar.sha1 deleted file mode 100644 index 3c0cb1335fb88..0000000000000 --- a/client/rest/licenses/httpclient5-5.1.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -208f9eed6d6ab709e2ae7a75b457ef60c0baefa5 \ No newline at end of file diff --git a/client/rest/licenses/httpclient5-5.2.1.jar.sha1 b/client/rest/licenses/httpclient5-5.2.1.jar.sha1 new file mode 100644 index 0000000000000..3555fe22f8e12 --- /dev/null +++ b/client/rest/licenses/httpclient5-5.2.1.jar.sha1 @@ -0,0 +1 @@ +0c900514d3446d9ce5d9dbd90c21192048125440 \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-5.1.5.jar.sha1 b/client/rest/licenses/httpcore5-5.1.5.jar.sha1 deleted file mode 100644 index 8da253152e970..0000000000000 --- a/client/rest/licenses/httpcore5-5.1.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -df9da3a1fa2351c4790245400ed28d78a8ddd3fc \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-5.2.2.jar.sha1 b/client/rest/licenses/httpcore5-5.2.2.jar.sha1 new file mode 100644 index 0000000000000..b641256c7d4a4 --- /dev/null +++ b/client/rest/licenses/httpcore5-5.2.2.jar.sha1 @@ -0,0 +1 @@ +6da28f5aa6c2b129ef49632e041a5203ce7507b2 \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-h2-5.1.5.jar.sha1 b/client/rest/licenses/httpcore5-h2-5.1.5.jar.sha1 deleted file mode 100644 index 097e6cc2a3be8..0000000000000 --- a/client/rest/licenses/httpcore5-h2-5.1.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -624660339afd5006d427457e6b10b10b32fd86f1 \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-h2-5.2.2.jar.sha1 b/client/rest/licenses/httpcore5-h2-5.2.2.jar.sha1 new file mode 100644 index 0000000000000..94bc0fa49bdb0 --- /dev/null +++ b/client/rest/licenses/httpcore5-h2-5.2.2.jar.sha1 @@ -0,0 +1 @@ +54ee1ed58fe8ac40be1083ea9873a6c734939ab9 \ No newline at end of file diff --git a/client/sniffer/licenses/httpclient5-5.1.4.jar.sha1 b/client/sniffer/licenses/httpclient5-5.1.4.jar.sha1 deleted file mode 100644 index 3c0cb1335fb88..0000000000000 --- a/client/sniffer/licenses/httpclient5-5.1.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -208f9eed6d6ab709e2ae7a75b457ef60c0baefa5 \ No newline at end of file diff --git a/client/sniffer/licenses/httpclient5-5.2.1.jar.sha1 b/client/sniffer/licenses/httpclient5-5.2.1.jar.sha1 new file mode 100644 index 0000000000000..3555fe22f8e12 --- /dev/null +++ b/client/sniffer/licenses/httpclient5-5.2.1.jar.sha1 @@ -0,0 +1 @@ +0c900514d3446d9ce5d9dbd90c21192048125440 \ No newline at end of file diff --git a/client/sniffer/licenses/httpcore5-5.1.5.jar.sha1 b/client/sniffer/licenses/httpcore5-5.1.5.jar.sha1 deleted file mode 100644 index 8da253152e970..0000000000000 --- a/client/sniffer/licenses/httpcore5-5.1.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -df9da3a1fa2351c4790245400ed28d78a8ddd3fc \ No newline at end of file diff --git a/client/sniffer/licenses/httpcore5-5.2.2.jar.sha1 b/client/sniffer/licenses/httpcore5-5.2.2.jar.sha1 new file mode 100644 index 0000000000000..b641256c7d4a4 --- /dev/null +++ b/client/sniffer/licenses/httpcore5-5.2.2.jar.sha1 @@ -0,0 +1 @@ +6da28f5aa6c2b129ef49632e041a5203ce7507b2 \ No newline at end of file diff --git a/libs/core/licenses/lucene-core-9.7.0-snapshot-204acc3.jar.sha1 b/libs/core/licenses/lucene-core-9.7.0-snapshot-204acc3.jar.sha1 deleted file mode 100644 index 2afe66c03cf22..0000000000000 --- a/libs/core/licenses/lucene-core-9.7.0-snapshot-204acc3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3aa698cf90f074cbf24acfd7feaaad84c5a6f829 \ No newline at end of file diff --git a/libs/core/licenses/lucene-core-9.7.0.jar.sha1 b/libs/core/licenses/lucene-core-9.7.0.jar.sha1 new file mode 100644 index 0000000000000..2b0f77275c0ab --- /dev/null +++ b/libs/core/licenses/lucene-core-9.7.0.jar.sha1 @@ -0,0 +1 @@ +ad391210ffd806931334be9670a35af00c56f959 \ No newline at end of file diff --git a/libs/core/src/main/java/org/opensearch/Version.java b/libs/core/src/main/java/org/opensearch/Version.java index c399f7f3e1b1c..8c0e247bc3169 100644 --- a/libs/core/src/main/java/org/opensearch/Version.java +++ b/libs/core/src/main/java/org/opensearch/Version.java @@ -89,7 +89,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_7_1 = new Version(2070199, org.apache.lucene.util.Version.LUCENE_9_5_0); public static final Version V_2_8_0 = new Version(2080099, org.apache.lucene.util.Version.LUCENE_9_6_0); public static final Version V_2_8_1 = new Version(2080199, org.apache.lucene.util.Version.LUCENE_9_6_0); - public static final Version V_2_9_0 = new Version(2090099, org.apache.lucene.util.Version.LUCENE_9_6_0); + public static final Version V_2_9_0 = new Version(2090099, org.apache.lucene.util.Version.LUCENE_9_7_0); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_7_0); public static final Version CURRENT = V_3_0_0; diff --git a/libs/core/src/main/java/org/opensearch/common/bytes/AbstractBytesReference.java b/libs/core/src/main/java/org/opensearch/common/bytes/AbstractBytesReference.java index 7b3c71321e4f0..043d45223498e 100644 --- a/libs/core/src/main/java/org/opensearch/common/bytes/AbstractBytesReference.java +++ b/libs/core/src/main/java/org/opensearch/common/bytes/AbstractBytesReference.java @@ -33,6 +33,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefIterator; +import org.apache.lucene.util.UnicodeUtil; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.core.xcontent.XContentBuilder; @@ -49,6 +50,7 @@ public abstract class AbstractBytesReference implements BytesReference { private Integer hash = null; // we cache the hash of this reference since it can be quite costly to re-calculated it + private static final int MAX_UTF16_LENGTH = Integer.MAX_VALUE >> 1; @Override public int getInt(int index) { @@ -80,9 +82,19 @@ public void writeTo(OutputStream os) throws IOException { } } + protected int getMaxUTF16Length() { + return MAX_UTF16_LENGTH; + } + @Override public String utf8ToString() { - return toBytesRef().utf8ToString(); + BytesRef bytesRef = toBytesRef(); + final char[] ref = new char[bytesRef.length]; + final int len = UnicodeUtil.UTF8toUTF16(bytesRef, ref); + if (len > getMaxUTF16Length()) { + throw new IllegalArgumentException("UTF16 String size is " + len + ", should be less than " + getMaxUTF16Length()); + } + return new String(ref, 0, len); } @Override diff --git a/libs/telemetry/build.gradle b/libs/telemetry/build.gradle new file mode 100644 index 0000000000000..ce94698836b4f --- /dev/null +++ b/libs/telemetry/build.gradle @@ -0,0 +1,23 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +dependencies { + testImplementation "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" + testImplementation "junit:junit:${versions.junit}" + testImplementation "org.hamcrest:hamcrest:${versions.hamcrest}" + testImplementation(project(":test:framework")) { + exclude group: 'org.opensearch', module: 'opensearch-telemetry' + } +} + +tasks.named('forbiddenApisMain').configure { + replaceSignatureFiles 'jdk-signatures' +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/Telemetry.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/Telemetry.java new file mode 100644 index 0000000000000..6f50699528b6b --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/Telemetry.java @@ -0,0 +1,31 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry; + +import org.opensearch.telemetry.metrics.MetricsTelemetry; +import org.opensearch.telemetry.tracing.TracingTelemetry; + +/** + * Interface defining telemetry + */ +public interface Telemetry { + + /** + * Provides tracing telemetry + * @return tracing telemetry instance + */ + TracingTelemetry getTracingTelemetry(); + + /** + * Provides metrics telemetry + * @return metrics telemetry instance + */ + MetricsTelemetry getMetricsTelemetry(); + +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsTelemetry.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsTelemetry.java new file mode 100644 index 0000000000000..fa3b7fd192f1a --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsTelemetry.java @@ -0,0 +1,16 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +/** + * Interface for metrics telemetry providers + */ +public interface MetricsTelemetry { + +} diff --git a/server/src/main/java/org/opensearch/rest/extensions/package-info.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/package-info.java similarity index 58% rename from server/src/main/java/org/opensearch/rest/extensions/package-info.java rename to libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/package-info.java index 64b92e8b5c149..dfe17cc1c11ed 100644 --- a/server/src/main/java/org/opensearch/rest/extensions/package-info.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/package-info.java @@ -6,5 +6,7 @@ * compatible open source license. */ -/** REST classes for the extensions package. OpenSearch extensions provide extensibility to OpenSearch.*/ -package org.opensearch.rest.extensions; +/** + * Contains metrics related classes + */ +package org.opensearch.telemetry.metrics; diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/package-info.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/package-info.java new file mode 100644 index 0000000000000..ad76f5e308bea --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Contains telemetry related classes + */ +package org.opensearch.telemetry; diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/AbstractSpan.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/AbstractSpan.java new file mode 100644 index 0000000000000..316edc971913e --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/AbstractSpan.java @@ -0,0 +1,45 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +/** + * Base span + */ +public abstract class AbstractSpan implements Span { + + /** + * name of the span + */ + private final String spanName; + /** + * span's parent span + */ + private final Span parentSpan; + + /** + * Base constructor + * @param spanName name of the span + * @param parentSpan span's parent span + */ + protected AbstractSpan(String spanName, Span parentSpan) { + this.spanName = spanName; + this.parentSpan = parentSpan; + } + + @Override + public Span getParentSpan() { + return parentSpan; + } + + @Override + public String getSpanName() { + return spanName; + } + +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultSpanScope.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultSpanScope.java new file mode 100644 index 0000000000000..58e9e0abad739 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultSpanScope.java @@ -0,0 +1,70 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import java.util.function.Consumer; + +/** + * Default implementation of Scope + */ +public class DefaultSpanScope implements SpanScope { + + private final Span span; + + private final Consumer onCloseConsumer; + + /** + * Creates Scope instance for the given span + * + * @param span underlying span + * @param onCloseConsumer consumer to execute on scope close + */ + public DefaultSpanScope(Span span, Consumer onCloseConsumer) { + this.span = span; + this.onCloseConsumer = onCloseConsumer; + } + + @Override + public void addSpanAttribute(String key, String value) { + span.addAttribute(key, value); + } + + @Override + public void addSpanAttribute(String key, long value) { + span.addAttribute(key, value); + } + + @Override + public void addSpanAttribute(String key, double value) { + span.addAttribute(key, value); + } + + @Override + public void addSpanAttribute(String key, boolean value) { + span.addAttribute(key, value); + } + + @Override + public void addSpanEvent(String event) { + span.addEvent(event); + } + + @Override + public void setError(Exception exception) { + span.setError(exception); + } + + /** + * Executes the runnable to end the scope + */ + @Override + public void close() { + onCloseConsumer.accept(span); + } +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java new file mode 100644 index 0000000000000..783edd238c1c2 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java @@ -0,0 +1,79 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import java.io.Closeable; +import java.io.IOException; + +/** + * + * The default tracer implementation. It handles tracing context propagation between spans by maintaining + * current active span in its storage + * + * + */ +public class DefaultTracer implements Tracer { + static final String THREAD_NAME = "th_name"; + + private final TracingTelemetry tracingTelemetry; + private final TracerContextStorage tracerContextStorage; + + /** + * Creates DefaultTracer instance + * + * @param tracingTelemetry tracing telemetry instance + * @param tracerContextStorage storage used for storing current span context + */ + public DefaultTracer(TracingTelemetry tracingTelemetry, TracerContextStorage tracerContextStorage) { + this.tracingTelemetry = tracingTelemetry; + this.tracerContextStorage = tracerContextStorage; + } + + @Override + public SpanScope startSpan(String spanName) { + Span span = createSpan(spanName, getCurrentSpan()); + setCurrentSpanInContext(span); + addDefaultAttributes(span); + return new DefaultSpanScope(span, (scopeSpan) -> endSpan(scopeSpan)); + } + + @Override + public void close() throws IOException { + ((Closeable) tracingTelemetry).close(); + } + + // Visible for testing + Span getCurrentSpan() { + return tracerContextStorage.get(TracerContextStorage.CURRENT_SPAN); + } + + private void endSpan(Span span) { + if (span != null) { + span.endSpan(); + setCurrentSpanInContext(span.getParentSpan()); + } + } + + private Span createSpan(String spanName, Span parentSpan) { + return tracingTelemetry.createSpan(spanName, parentSpan); + } + + private void setCurrentSpanInContext(Span span) { + tracerContextStorage.put(TracerContextStorage.CURRENT_SPAN, span); + } + + /** + * Adds default attributes in the span + * @param span the current active span + */ + protected void addDefaultAttributes(Span span) { + span.addAttribute(THREAD_NAME, Thread.currentThread().getName()); + } + +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Span.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Span.java new file mode 100644 index 0000000000000..d60b4e60adece --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Span.java @@ -0,0 +1,91 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +/** + * An interface that represents a tracing span. + * Spans are created by the Tracer.startSpan method. + * Span must be ended by calling Tracer.endSpan which internally calls Span's endSpan. + */ +public interface Span { + + /** + * Ends the span + */ + void endSpan(); + + /** + * Returns span's parent span + */ + Span getParentSpan(); + + /** + * Returns the name of the {@link Span} + */ + String getSpanName(); + + /** + * Adds string type attribute in the span + * + * @param key of the attribute + * @param value value of the attribute + */ + void addAttribute(String key, String value); + + /** + * Adds long type attribute in the span + * + * @param key of the attribute + * @param value value of the attribute + */ + void addAttribute(String key, Long value); + + /** + * Adds double type attribute in the span + * + * @param key of the attribute + * @param value value of the attribute + */ + void addAttribute(String key, Double value); + + /** + * Adds boolean type attribute in the span + * + * @param key of the attribute + * @param value value of the attribute + */ + void addAttribute(String key, Boolean value); + + /** + * Records error in the span + * + * @param exception exception to be recorded + */ + void setError(Exception exception); + + /** + * Adds an event in the span + * + * @param event name of the event + */ + void addEvent(String event); + + /** + * Returns traceId of the span + * @return span's traceId + */ + String getTraceId(); + + /** + * Returns spanId of the span + * @return span's spanId + */ + String getSpanId(); + +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanReference.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanReference.java new file mode 100644 index 0000000000000..99d1bd3c93c84 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanReference.java @@ -0,0 +1,41 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +/** + * Wrapper class to hold reference of Span + */ +public class SpanReference { + + private Span span; + + /** + * Creates the wrapper with given span + * @param span the span object to wrap + */ + public SpanReference(Span span) { + this.span = span; + } + + /** + * Returns the span object + * @return underlying span + */ + public Span getSpan() { + return span; + } + + /** + * Updates the underlying span + * @param span underlying span + */ + public void setSpan(Span span) { + this.span = span; + } +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanScope.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanScope.java new file mode 100644 index 0000000000000..cf67165d889bc --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanScope.java @@ -0,0 +1,74 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import org.opensearch.telemetry.tracing.noop.NoopSpanScope; + +/** + * An auto-closeable that represents scope of the span. + * It provides interface for all the span operations. + */ +public interface SpanScope extends AutoCloseable { + /** + * No-op Scope implementation + */ + SpanScope NO_OP = new NoopSpanScope(); + + /** + * Adds string attribute to the {@link Span}. + * + * @param key attribute key + * @param value attribute value + */ + void addSpanAttribute(String key, String value); + + /** + * Adds long attribute to the {@link Span}. + * + * @param key attribute key + * @param value attribute value + */ + void addSpanAttribute(String key, long value); + + /** + * Adds double attribute to the {@link Span}. + * + * @param key attribute key + * @param value attribute value + */ + void addSpanAttribute(String key, double value); + + /** + * Adds boolean attribute to the {@link Span}. + * + * @param key attribute key + * @param value attribute value + */ + void addSpanAttribute(String key, boolean value); + + /** + * Adds an event to the {@link Span}. + * + * @param event event name + */ + void addSpanEvent(String event); + + /** + * Records error in the span + * + * @param exception exception to be recorded + */ + void setError(Exception exception); + + /** + * closes the scope + */ + @Override + void close(); +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Tracer.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Tracer.java new file mode 100644 index 0000000000000..d422b58aa0a9f --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Tracer.java @@ -0,0 +1,29 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import java.io.Closeable; + +/** + * Tracer is the interface used to create a {@link Span} + * It automatically handles the context propagation between threads, tasks, nodes etc. + * + * All methods on the Tracer object are multi-thread safe. + */ +public interface Tracer extends Closeable { + + /** + * Starts the {@link Span} with given name + * + * @param spanName span name + * @return scope of the span, must be closed with explicit close or with try-with-resource + */ + SpanScope startSpan(String spanName); + +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracerContextStorage.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracerContextStorage.java new file mode 100644 index 0000000000000..eb93006835332 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracerContextStorage.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +/** + * Storage interface used for storing tracing context + * @param key type + * @param value type + */ +public interface TracerContextStorage { + /** + * Key for storing current span + */ + String CURRENT_SPAN = "current_span"; + + /** + * Fetches value corresponding to key + * @param key of the tracing context + * @return value for key + */ + V get(K key); + + /** + * Puts tracing context value with key + * @param key of the tracing context + * @param value of the tracing context + */ + void put(K key, V value); +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracingContextPropagator.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracingContextPropagator.java new file mode 100644 index 0000000000000..1152e3aedfa88 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracingContextPropagator.java @@ -0,0 +1,34 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import java.util.Map; +import java.util.function.BiConsumer; + +/** + * Interface defining the tracing related context propagation + */ +public interface TracingContextPropagator { + + /** + * Extracts current span from context + * @param props properties + * @return current span + */ + Span extract(Map props); + + /** + * Injects tracing context + * + * @param currentSpan the current active span + * @param setter to add tracing context in map + */ + void inject(Span currentSpan, BiConsumer setter); + +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracingTelemetry.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracingTelemetry.java new file mode 100644 index 0000000000000..16c76bd0cc141 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracingTelemetry.java @@ -0,0 +1,37 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import java.io.Closeable; + +/** + * Interface for tracing telemetry providers + */ +public interface TracingTelemetry extends Closeable { + + /** + * Creates span with provided arguments + * @param spanName name of the span + * @param parentSpan span's parent span + * @return span instance + */ + Span createSpan(String spanName, Span parentSpan); + + /** + * provides tracing context propagator + * @return tracing context propagator instance + */ + TracingContextPropagator getContextPropagator(); + + /** + * closes the resource + */ + void close(); + +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopSpanScope.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopSpanScope.java new file mode 100644 index 0000000000000..c0dbaf65ba48b --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopSpanScope.java @@ -0,0 +1,57 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.noop; + +import org.opensearch.telemetry.tracing.SpanScope; + +/** + * No-op implementation of SpanScope + */ +public final class NoopSpanScope implements SpanScope { + + /** + * No-args constructor + */ + public NoopSpanScope() {} + + @Override + public void addSpanAttribute(String key, String value) { + + } + + @Override + public void addSpanAttribute(String key, long value) { + + } + + @Override + public void addSpanAttribute(String key, double value) { + + } + + @Override + public void addSpanAttribute(String key, boolean value) { + + } + + @Override + public void addSpanEvent(String event) { + + } + + @Override + public void setError(Exception exception) { + + } + + @Override + public void close() { + + } +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopTracer.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopTracer.java new file mode 100644 index 0000000000000..a66cbcf4fef52 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopTracer.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.noop; + +import org.opensearch.telemetry.tracing.SpanScope; +import org.opensearch.telemetry.tracing.Tracer; + +/** + * No-op implementation of Tracer + */ +public class NoopTracer implements Tracer { + + /** + * No-op Tracer instance + */ + public static final Tracer INSTANCE = new NoopTracer(); + + private NoopTracer() {} + + @Override + public SpanScope startSpan(String spanName) { + return SpanScope.NO_OP; + } + + @Override + public void close() { + + } +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/package-info.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/package-info.java new file mode 100644 index 0000000000000..b9d83e7bc7275 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Contains No-op implementations + */ +package org.opensearch.telemetry.tracing.noop; diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/package-info.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/package-info.java new file mode 100644 index 0000000000000..66898bd58b753 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Contains tracing related classes + */ +package org.opensearch.telemetry.tracing; diff --git a/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultSpanScopeTests.java b/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultSpanScopeTests.java new file mode 100644 index 0000000000000..eea6b77ce6e1e --- /dev/null +++ b/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultSpanScopeTests.java @@ -0,0 +1,79 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import org.opensearch.test.OpenSearchTestCase; + +import java.util.function.Consumer; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; + +public class DefaultSpanScopeTests extends OpenSearchTestCase { + + @SuppressWarnings("unchecked") + public void testClose() { + Span mockSpan = mock(Span.class); + Consumer mockConsumer = mock(Consumer.class); + DefaultSpanScope defaultSpanScope = new DefaultSpanScope(mockSpan, mockConsumer); + defaultSpanScope.close(); + + verify(mockConsumer).accept(mockSpan); + } + + public void testAddSpanAttributeString() { + Span mockSpan = mock(Span.class); + DefaultSpanScope defaultSpanScope = new DefaultSpanScope(mockSpan, null); + defaultSpanScope.addSpanAttribute("key", "value"); + + verify(mockSpan).addAttribute("key", "value"); + } + + public void testAddSpanAttributeLong() { + Span mockSpan = mock(Span.class); + DefaultSpanScope defaultSpanScope = new DefaultSpanScope(mockSpan, null); + defaultSpanScope.addSpanAttribute("key", 1L); + + verify(mockSpan).addAttribute("key", 1L); + } + + public void testAddSpanAttributeDouble() { + Span mockSpan = mock(Span.class); + DefaultSpanScope defaultSpanScope = new DefaultSpanScope(mockSpan, null); + defaultSpanScope.addSpanAttribute("key", 1.0); + + verify(mockSpan).addAttribute("key", 1.0); + } + + public void testAddSpanAttributeBoolean() { + Span mockSpan = mock(Span.class); + DefaultSpanScope defaultSpanScope = new DefaultSpanScope(mockSpan, null); + defaultSpanScope.addSpanAttribute("key", true); + + verify(mockSpan).addAttribute("key", true); + } + + public void testAddEvent() { + Span mockSpan = mock(Span.class); + DefaultSpanScope defaultSpanScope = new DefaultSpanScope(mockSpan, null); + defaultSpanScope.addSpanEvent("eventName"); + + verify(mockSpan).addEvent("eventName"); + } + + public void testSetError() { + Span mockSpan = mock(Span.class); + DefaultSpanScope defaultSpanScope = new DefaultSpanScope(mockSpan, null); + Exception ex = new Exception("error"); + defaultSpanScope.setError(ex); + + verify(mockSpan).setError(ex); + } + +} diff --git a/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultTracerTests.java b/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultTracerTests.java new file mode 100644 index 0000000000000..2b7a379b0051a --- /dev/null +++ b/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultTracerTests.java @@ -0,0 +1,77 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import org.junit.Assert; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.mockito.Mockito.verify; + +public class DefaultTracerTests extends OpenSearchTestCase { + + private TracingTelemetry mockTracingTelemetry; + private TracerContextStorage mockTracerContextStorage; + private Span mockSpan; + private Span mockParentSpan; + + @Override + public void setUp() throws Exception { + super.setUp(); + setupMocks(); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + } + + public void testCreateSpan() { + DefaultTracer defaultTracer = new DefaultTracer(mockTracingTelemetry, mockTracerContextStorage); + + defaultTracer.startSpan("span_name"); + + Assert.assertEquals("span_name", defaultTracer.getCurrentSpan().getSpanName()); + } + + public void testEndSpanByClosingScope() { + DefaultTracer defaultTracer = new DefaultTracer(mockTracingTelemetry, mockTracerContextStorage); + try (SpanScope spanScope = defaultTracer.startSpan("span_name")) { + verify(mockTracerContextStorage).put(TracerContextStorage.CURRENT_SPAN, mockSpan); + } + verify(mockTracerContextStorage).put(TracerContextStorage.CURRENT_SPAN, mockParentSpan); + } + + public void testClose() throws IOException { + Tracer defaultTracer = new DefaultTracer(mockTracingTelemetry, mockTracerContextStorage); + + defaultTracer.close(); + + verify(mockTracingTelemetry).close(); + } + + @SuppressWarnings("unchecked") + private void setupMocks() { + mockTracingTelemetry = mock(TracingTelemetry.class); + mockSpan = mock(Span.class); + mockParentSpan = mock(Span.class); + mockTracerContextStorage = mock(TracerContextStorage.class); + when(mockSpan.getSpanName()).thenReturn("span_name"); + when(mockSpan.getSpanId()).thenReturn("span_id"); + when(mockSpan.getTraceId()).thenReturn("trace_id"); + when(mockSpan.getParentSpan()).thenReturn(mockParentSpan); + when(mockParentSpan.getSpanId()).thenReturn("parent_span_id"); + when(mockParentSpan.getTraceId()).thenReturn("trace_id"); + when(mockTracerContextStorage.get(TracerContextStorage.CURRENT_SPAN)).thenReturn(mockParentSpan, mockSpan); + when(mockTracingTelemetry.createSpan("span_name", mockParentSpan)).thenReturn(mockSpan); + } +} diff --git a/libs/x-content/src/main/java/org/opensearch/common/xcontent/cbor/CborXContent.java b/libs/x-content/src/main/java/org/opensearch/common/xcontent/cbor/CborXContent.java index d7df53e7a0cf5..46891b279ba43 100644 --- a/libs/x-content/src/main/java/org/opensearch/common/xcontent/cbor/CborXContent.java +++ b/libs/x-content/src/main/java/org/opensearch/common/xcontent/cbor/CborXContent.java @@ -36,6 +36,7 @@ import com.fasterxml.jackson.core.JsonGenerator; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.StreamReadConstraints; +import com.fasterxml.jackson.core.StreamReadFeature; import com.fasterxml.jackson.dataformat.cbor.CBORFactory; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.MediaType; @@ -75,6 +76,7 @@ public static XContentBuilder contentBuilder() throws IOException { cborFactory.configure(JsonGenerator.Feature.AUTO_CLOSE_JSON_CONTENT, false); cborFactory.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, true); cborFactory.setStreamReadConstraints(StreamReadConstraints.builder().maxStringLength(DEFAULT_MAX_STRING_LEN).build()); + cborFactory.configure(StreamReadFeature.USE_FAST_DOUBLE_PARSER.mappedFeature(), true); cborXContent = new CborXContent(); } diff --git a/libs/x-content/src/main/java/org/opensearch/common/xcontent/json/JsonXContent.java b/libs/x-content/src/main/java/org/opensearch/common/xcontent/json/JsonXContent.java index 8ff8e7730b189..e6c27e4cf3eef 100644 --- a/libs/x-content/src/main/java/org/opensearch/common/xcontent/json/JsonXContent.java +++ b/libs/x-content/src/main/java/org/opensearch/common/xcontent/json/JsonXContent.java @@ -37,7 +37,7 @@ import com.fasterxml.jackson.core.JsonGenerator; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.StreamReadConstraints; - +import com.fasterxml.jackson.core.StreamReadFeature; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.NamedXContentRegistry; @@ -78,6 +78,7 @@ public static XContentBuilder contentBuilder() throws IOException { jsonFactory.configure(JsonGenerator.Feature.AUTO_CLOSE_JSON_CONTENT, false); jsonFactory.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, true); jsonFactory.setStreamReadConstraints(StreamReadConstraints.builder().maxStringLength(DEFAULT_MAX_STRING_LEN).build()); + jsonFactory.configure(StreamReadFeature.USE_FAST_DOUBLE_PARSER.mappedFeature(), true); jsonXContent = new JsonXContent(); } diff --git a/libs/x-content/src/main/java/org/opensearch/common/xcontent/smile/SmileXContent.java b/libs/x-content/src/main/java/org/opensearch/common/xcontent/smile/SmileXContent.java index e0a39df1589a2..eb968556de8c9 100644 --- a/libs/x-content/src/main/java/org/opensearch/common/xcontent/smile/SmileXContent.java +++ b/libs/x-content/src/main/java/org/opensearch/common/xcontent/smile/SmileXContent.java @@ -36,6 +36,7 @@ import com.fasterxml.jackson.core.JsonGenerator; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.StreamReadConstraints; +import com.fasterxml.jackson.core.StreamReadFeature; import com.fasterxml.jackson.dataformat.smile.SmileFactory; import com.fasterxml.jackson.dataformat.smile.SmileGenerator; import org.opensearch.core.xcontent.DeprecationHandler; @@ -77,6 +78,7 @@ public static XContentBuilder contentBuilder() throws IOException { smileFactory.configure(JsonGenerator.Feature.AUTO_CLOSE_JSON_CONTENT, false); smileFactory.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, true); smileFactory.setStreamReadConstraints(StreamReadConstraints.builder().maxStringLength(DEFAULT_MAX_STRING_LEN).build()); + smileFactory.configure(StreamReadFeature.USE_FAST_DOUBLE_PARSER.mappedFeature(), true); smileXContent = new SmileXContent(); } diff --git a/libs/x-content/src/main/java/org/opensearch/common/xcontent/yaml/YamlXContent.java b/libs/x-content/src/main/java/org/opensearch/common/xcontent/yaml/YamlXContent.java index 1e73cb2bd9c5e..bb4fa9a09d448 100644 --- a/libs/x-content/src/main/java/org/opensearch/common/xcontent/yaml/YamlXContent.java +++ b/libs/x-content/src/main/java/org/opensearch/common/xcontent/yaml/YamlXContent.java @@ -35,6 +35,7 @@ import com.fasterxml.jackson.core.JsonEncoding; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.StreamReadConstraints; +import com.fasterxml.jackson.core.StreamReadFeature; import com.fasterxml.jackson.dataformat.yaml.YAMLFactory; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.NamedXContentRegistry; @@ -70,6 +71,7 @@ public static XContentBuilder contentBuilder() throws IOException { yamlFactory = new YAMLFactory(); yamlFactory.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, true); yamlFactory.setStreamReadConstraints(StreamReadConstraints.builder().maxStringLength(DEFAULT_MAX_STRING_LEN).build()); + yamlFactory.configure(StreamReadFeature.USE_FAST_DOUBLE_PARSER.mappedFeature(), true); yamlXContent = new YamlXContent(); } diff --git a/modules/ingest-common/src/internalClusterTest/java/org/opensearch/ingest/common/IngestRestartIT.java b/modules/ingest-common/src/internalClusterTest/java/org/opensearch/ingest/common/IngestRestartIT.java index 784dad8cea49f..1f0680b27796d 100644 --- a/modules/ingest-common/src/internalClusterTest/java/org/opensearch/ingest/common/IngestRestartIT.java +++ b/modules/ingest-common/src/internalClusterTest/java/org/opensearch/ingest/common/IngestRestartIT.java @@ -132,7 +132,7 @@ public void testFailureInConditionalProcessor() { for (int k = 0; k < nodeCount; k++) { List stats = r.getNodes().get(k).getIngestStats().getProcessorStats().get(pipelineId); for (IngestStats.ProcessorStat st : stats) { - assertThat(st.getStats().getIngestCurrent(), greaterThanOrEqualTo(0L)); + assertThat(st.getStats().getCurrent(), greaterThanOrEqualTo(0L)); } } } diff --git a/modules/lang-expression/licenses/lucene-expressions-9.7.0-snapshot-204acc3.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.7.0-snapshot-204acc3.jar.sha1 deleted file mode 100644 index f70c327ab5f6b..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-9.7.0-snapshot-204acc3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c31671978777d97c026b13decfbef1d7eeed4410 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.7.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.7.0.jar.sha1 new file mode 100644 index 0000000000000..ecf696b4b3b83 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-9.7.0.jar.sha1 @@ -0,0 +1 @@ +297e1cfade4ef71466cc9d4f361d81807c8dc4c8 \ No newline at end of file diff --git a/modules/reindex/src/internalClusterTest/java/org/opensearch/index/codec/MultiCodecReindexIT.java b/modules/reindex/src/internalClusterTest/java/org/opensearch/index/codec/MultiCodecReindexIT.java new file mode 100644 index 0000000000000..87f3c68d8af76 --- /dev/null +++ b/modules/reindex/src/internalClusterTest/java/org/opensearch/index/codec/MultiCodecReindexIT.java @@ -0,0 +1,189 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec; + +import org.opensearch.action.admin.indices.flush.FlushResponse; +import org.opensearch.action.admin.indices.refresh.RefreshResponse; +import org.opensearch.action.admin.indices.segments.IndicesSegmentsRequest; +import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.opensearch.action.support.ActiveShardCount; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.engine.Segment; +import org.opensearch.index.reindex.BulkByScrollResponse; +import org.opensearch.index.reindex.ReindexAction; +import org.opensearch.index.reindex.ReindexRequestBuilder; +import org.opensearch.index.reindex.ReindexTestCase; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static java.util.stream.Collectors.toList; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_BLOCKS_METADATA; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_BLOCKS_READ; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_BLOCKS_WRITE; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_READ_ONLY; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_READ_ONLY_ALLOW_DELETE; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; + +public class MultiCodecReindexIT extends ReindexTestCase { + + public void testReindexingMultipleCodecs() throws InterruptedException, ExecutionException { + internalCluster().ensureAtLeastNumDataNodes(1); + Map codecMap = Map.of( + "best_compression", + "BEST_COMPRESSION", + "zstd_no_dict", + "ZSTD_NO_DICT", + "zstd", + "ZSTD", + "default", + "BEST_SPEED" + ); + + for (Map.Entry codec : codecMap.entrySet()) { + assertReindexingWithMultipleCodecs(codec.getKey(), codec.getValue(), codecMap); + } + + } + + private void assertReindexingWithMultipleCodecs(String destCodec, String destCodecMode, Map codecMap) + throws ExecutionException, InterruptedException { + + final String index = "test-index" + destCodec; + final String destIndex = "dest-index" + destCodec; + + // creating source index + createIndex( + index, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.codec", "default") + .put("index.merge.policy.max_merged_segment", "1b") + .build() + ); + ensureGreen(index); + + final int nbDocs = randomIntBetween(2, 5); + + // indexing with all 4 codecs + for (Map.Entry codec : codecMap.entrySet()) { + useCodec(index, codec.getKey()); + ingestDocs(index, nbDocs); + } + + assertTrue( + getSegments(index).stream() + .flatMap(s -> s.getAttributes().values().stream()) + .collect(Collectors.toSet()) + .containsAll(codecMap.values()) + ); + + // creating destination index with destination codec + createIndex( + destIndex, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.codec", destCodec) + .build() + ); + + BulkByScrollResponse bulkResponse = new ReindexRequestBuilder(client(), ReindexAction.INSTANCE).source(index) + .destination(destIndex) + .refresh(true) + .waitForActiveShards(ActiveShardCount.ONE) + .get(); + + assertEquals(codecMap.size() * nbDocs, bulkResponse.getCreated()); + assertEquals(codecMap.size() * nbDocs, bulkResponse.getTotal()); + assertEquals(0, bulkResponse.getDeleted()); + assertEquals(0, bulkResponse.getNoops()); + assertEquals(0, bulkResponse.getVersionConflicts()); + assertEquals(1, bulkResponse.getBatches()); + assertTrue(bulkResponse.getTook().getMillis() > 0); + assertEquals(0, bulkResponse.getBulkFailures().size()); + assertEquals(0, bulkResponse.getSearchFailures().size()); + assertTrue(getSegments(destIndex).stream().allMatch(segment -> segment.attributes.containsValue(destCodecMode))); + } + + private void useCodec(String index, String codec) throws ExecutionException, InterruptedException { + assertAcked(client().admin().indices().prepareClose(index)); + + assertAcked( + client().admin() + .indices() + .updateSettings(new UpdateSettingsRequest(index).settings(Settings.builder().put("index.codec", codec))) + .get() + ); + + assertAcked(client().admin().indices().prepareOpen(index)); + } + + private void flushAndRefreshIndex(String index) { + + // Request is not blocked + for (String blockSetting : Arrays.asList( + SETTING_BLOCKS_READ, + SETTING_BLOCKS_WRITE, + SETTING_READ_ONLY, + SETTING_BLOCKS_METADATA, + SETTING_READ_ONLY_ALLOW_DELETE + )) { + try { + enableIndexBlock(index, blockSetting); + // flush + FlushResponse flushResponse = client().admin().indices().prepareFlush(index).setForce(true).execute().actionGet(); + assertNoFailures(flushResponse); + + // refresh + RefreshResponse refreshResponse = client().admin().indices().prepareRefresh(index).execute().actionGet(); + assertNoFailures(refreshResponse); + } finally { + disableIndexBlock(index, blockSetting); + } + } + } + + private void ingestDocs(String index, int nbDocs) throws InterruptedException { + + indexRandom( + randomBoolean(), + false, + randomBoolean(), + IntStream.range(0, nbDocs) + .mapToObj(i -> client().prepareIndex(index).setId(UUID.randomUUID().toString()).setSource("num", i)) + .collect(toList()) + ); + flushAndRefreshIndex(index); + } + + private ArrayList getSegments(String index) { + + return new ArrayList<>( + client().admin() + .indices() + .segments(new IndicesSegmentsRequest(index)) + .actionGet() + .getIndices() + .get(index) + .getShards() + .get(0) + .getShards()[0].getSegments() + ); + } + +} diff --git a/modules/search-pipeline-common/src/yamlRestTest/resources/rest-api-spec/test/search_pipeline/50_script_processor.yml b/modules/search-pipeline-common/src/yamlRestTest/resources/rest-api-spec/test/search_pipeline/50_script_processor.yml index 9b2dc0c41ff31..9d855e8a1861a 100644 --- a/modules/search-pipeline-common/src/yamlRestTest/resources/rest-api-spec/test/search_pipeline/50_script_processor.yml +++ b/modules/search-pipeline-common/src/yamlRestTest/resources/rest-api-spec/test/search_pipeline/50_script_processor.yml @@ -39,7 +39,7 @@ teardown: { "script" : { "lang" : "painless", - "source" : "ctx._source['size'] += 10; ctx._source['from'] -= 1; ctx._source['explain'] = !ctx._source['explain']; ctx._source['version'] = !ctx._source['version']; ctx._source['seq_no_primary_term'] = !ctx._source['seq_no_primary_term']; ctx._source['track_scores'] = !ctx._source['track_scores']; ctx._source['track_total_hits'] = 1; ctx._source['min_score'] -= 0.9; ctx._source['terminate_after'] += 2; ctx._source['profile'] = !ctx._source['profile'];" + "source" : "ctx._source['size'] += 10; ctx._source['from'] = ctx._source['from'] <= 0 ? ctx._source['from'] : ctx._source['from'] - 1 ; ctx._source['explain'] = !ctx._source['explain']; ctx._source['version'] = !ctx._source['version']; ctx._source['seq_no_primary_term'] = !ctx._source['seq_no_primary_term']; ctx._source['track_scores'] = !ctx._source['track_scores']; ctx._source['track_total_hits'] = 1; ctx._source['min_score'] -= 0.9; ctx._source['terminate_after'] += 2; ctx._source['profile'] = !ctx._source['profile'];" } } ] diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index e0a491ab63d7b..83c4db80b7798 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -127,7 +127,6 @@ thirdPartyAudit { 'com.aayushatharva.brotli4j.decoder.DecoderJNI$Status', 'com.aayushatharva.brotli4j.decoder.DecoderJNI$Wrapper', 'com.aayushatharva.brotli4j.encoder.BrotliEncoderChannel', - 'com.aayushatharva.brotli4j.encoder.Encoder', 'com.aayushatharva.brotli4j.encoder.Encoder$Mode', 'com.aayushatharva.brotli4j.encoder.Encoder$Parameters', // classes are missing diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.93.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.93.Final.jar.sha1 deleted file mode 100644 index 5c5a17a9466f1..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.93.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -87fac21f4ef95157866b07b48e3c707a2f13c581 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.94.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.94.Final.jar.sha1 new file mode 100644 index 0000000000000..05b1c2a4d614e --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.94.Final.jar.sha1 @@ -0,0 +1 @@ +eec248b26f16e888688e5bb37b7eeda76b78d2f7 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.93.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.93.Final.jar.sha1 deleted file mode 100644 index 2b12a111335a2..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.93.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -503badb458b6586632be8d1f81aa4e5ab99a80fc \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.94.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.94.Final.jar.sha1 new file mode 100644 index 0000000000000..baa7e25f1ac49 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.94.Final.jar.sha1 @@ -0,0 +1 @@ +c70ef20ca338558147887df60f46341bc47f6900 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.93.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.93.Final.jar.sha1 deleted file mode 100644 index 6719e882e40fe..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.93.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -36acf0c94d03eb6ecef78a749a32cbb7dc0c57b4 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.94.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.94.Final.jar.sha1 new file mode 100644 index 0000000000000..8c018be2565e5 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.94.Final.jar.sha1 @@ -0,0 +1 @@ +9e5404764092c1f6305ad5719078f46ab228d587 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.93.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.93.Final.jar.sha1 deleted file mode 100644 index 02423842d6244..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http2-4.1.93.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0f1625b43bde13ec057da0d2fe381ded2547a70e \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.94.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.94.Final.jar.sha1 new file mode 100644 index 0000000000000..e73026b412972 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http2-4.1.94.Final.jar.sha1 @@ -0,0 +1 @@ +f651595784d6cca4cbca6a8ad74c48fceed6cea8 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.93.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.93.Final.jar.sha1 deleted file mode 100644 index 2324a54dc5735..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.93.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1cfc49b91b0d3ddb30c9f7d8467e5d02ae8babdf \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.94.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.94.Final.jar.sha1 new file mode 100644 index 0000000000000..b787338551ede --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.94.Final.jar.sha1 @@ -0,0 +1 @@ +ad4ecf779ebc794cd351f57792f56ea01387b868 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.93.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.93.Final.jar.sha1 deleted file mode 100644 index 54e5b22047812..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.93.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -10f7ed9d8e1bfcba416074c70e5388be96116bfc \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.94.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.94.Final.jar.sha1 new file mode 100644 index 0000000000000..b08e85ba7adf8 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.94.Final.jar.sha1 @@ -0,0 +1 @@ +cd9121ce24d6d3f2898946d04b0ef3ec548b00b4 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.93.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.93.Final.jar.sha1 deleted file mode 100644 index c795762c2d7f1..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.93.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3860e99075f9e078364ed38f6d6fc8825b7a168a \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.94.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.94.Final.jar.sha1 new file mode 100644 index 0000000000000..4c9e4dda2b852 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.94.Final.jar.sha1 @@ -0,0 +1 @@ +e96f649e8e9dcb29a1f8e95328b99c9eb6cf76c2 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.93.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.93.Final.jar.sha1 deleted file mode 100644 index 687cade3c7b3a..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.93.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1a0894915c8027ce83b4d6a811c4e765955efd15 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.94.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.94.Final.jar.sha1 new file mode 100644 index 0000000000000..ed7760b8e15d1 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.94.Final.jar.sha1 @@ -0,0 +1 @@ +ec783a737f96991a87b1d5794e2f9eb2024d708a \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.93.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.93.Final.jar.sha1 deleted file mode 100644 index 0f0acb2eccddf..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.93.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f4fb8b4c8da539091f43abcbb9f0389e48807eea \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.94.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.94.Final.jar.sha1 new file mode 100644 index 0000000000000..43bc960a347a1 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.94.Final.jar.sha1 @@ -0,0 +1 @@ +3fa5f9d04b6b782d869d6e0657d896eeadca5866 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.7.0-snapshot-204acc3.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.7.0-snapshot-204acc3.jar.sha1 deleted file mode 100644 index b5c59c8f5ed8c..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.7.0-snapshot-204acc3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e31f7c161ad2b7652b1ac9100b4fa67d52be86eb \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.7.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.7.0.jar.sha1 new file mode 100644 index 0000000000000..0ed030926ab93 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.7.0.jar.sha1 @@ -0,0 +1 @@ +94293b169fb8572f440a5a4a523320ecf9778ffe \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.7.0-snapshot-204acc3.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.7.0-snapshot-204acc3.jar.sha1 deleted file mode 100644 index e3f1d7afdc72a..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.7.0-snapshot-204acc3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ed13aaa5843c76ba8bb9fcd1cac6de8855ea7f1c \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.7.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.7.0.jar.sha1 new file mode 100644 index 0000000000000..ddd67276606a5 --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.7.0.jar.sha1 @@ -0,0 +1 @@ +2df800a38b64867b8dcd61fc2cd986114e4a80cb \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.7.0-snapshot-204acc3.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.7.0-snapshot-204acc3.jar.sha1 deleted file mode 100644 index 12210d41b4e6f..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.7.0-snapshot-204acc3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f8673a2b86d3868e9ef2745d1d3fa9ad5a63e84f \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.7.0.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.7.0.jar.sha1 new file mode 100644 index 0000000000000..0cd68af98e724 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.7.0.jar.sha1 @@ -0,0 +1 @@ +a01e8153f34d72e8c8c0180c1dea5b10f677dd3a \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.7.0-snapshot-204acc3.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.7.0-snapshot-204acc3.jar.sha1 deleted file mode 100644 index 307d90ca834b6..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.7.0-snapshot-204acc3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f7a585d5f62033f19a5ec79b66a74cc24c9e9d10 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.7.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.7.0.jar.sha1 new file mode 100644 index 0000000000000..c7b4d2dc6da75 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.7.0.jar.sha1 @@ -0,0 +1 @@ +b7d47d54683b0b1e09b271c32d1b7d3eb1990f49 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.7.0-snapshot-204acc3.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.7.0-snapshot-204acc3.jar.sha1 deleted file mode 100644 index b4e595a07ffda..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.7.0-snapshot-204acc3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -40ba622c3aa91a0a8f2747617fef8f59bf212345 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.7.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.7.0.jar.sha1 new file mode 100644 index 0000000000000..8df7245044171 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.7.0.jar.sha1 @@ -0,0 +1 @@ +5e68b9816e6cff8ee15f5b350cf2ffa54f9828b7 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.7.0-snapshot-204acc3.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.7.0-snapshot-204acc3.jar.sha1 deleted file mode 100644 index 9b6dcc21465fa..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.7.0-snapshot-204acc3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e08432a3db0bde29f425812896bd695c5477b05f \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.7.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.7.0.jar.sha1 new file mode 100644 index 0000000000000..974e4202f5ffb --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.7.0.jar.sha1 @@ -0,0 +1 @@ +d23b1f05b471e05d0d6068b3ece7c8c65672eae7 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.7.0-snapshot-204acc3.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.7.0-snapshot-204acc3.jar.sha1 deleted file mode 100644 index b70b4e8d6c35b..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.7.0-snapshot-204acc3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a44008e83286060f0265a32f9a9b586e86ec03b1 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.7.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.7.0.jar.sha1 new file mode 100644 index 0000000000000..dce408a7d40ef --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.7.0.jar.sha1 @@ -0,0 +1 @@ +dfb4313f3c68d337310522840d7144c1605d084a \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/httpclient-4.5.13.jar.sha1 b/plugins/discovery-azure-classic/licenses/httpclient-4.5.13.jar.sha1 deleted file mode 100644 index 3281e21595b39..0000000000000 --- a/plugins/discovery-azure-classic/licenses/httpclient-4.5.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e5f6cae5ca7ecaac1ec2827a9e2d65ae2869cada \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/httpclient-4.5.14.jar.sha1 b/plugins/discovery-azure-classic/licenses/httpclient-4.5.14.jar.sha1 new file mode 100644 index 0000000000000..66e05851c2e3c --- /dev/null +++ b/plugins/discovery-azure-classic/licenses/httpclient-4.5.14.jar.sha1 @@ -0,0 +1 @@ +1194890e6f56ec29177673f2f12d0b8e627dec98 \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/httpcore-4.4.15.jar.sha1 b/plugins/discovery-azure-classic/licenses/httpcore-4.4.15.jar.sha1 deleted file mode 100644 index 42a03b5d7a376..0000000000000 --- a/plugins/discovery-azure-classic/licenses/httpcore-4.4.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7f2e0c573eaa7a74bac2e89b359e1f73d92a0a1d \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/httpcore-4.4.16.jar.sha1 b/plugins/discovery-azure-classic/licenses/httpcore-4.4.16.jar.sha1 new file mode 100644 index 0000000000000..172110694b5bd --- /dev/null +++ b/plugins/discovery-azure-classic/licenses/httpcore-4.4.16.jar.sha1 @@ -0,0 +1 @@ +51cf043c87253c9f58b539c9f7e44c8894223850 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/httpclient-4.5.13.jar.sha1 b/plugins/discovery-ec2/licenses/httpclient-4.5.13.jar.sha1 deleted file mode 100644 index 3281e21595b39..0000000000000 --- a/plugins/discovery-ec2/licenses/httpclient-4.5.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e5f6cae5ca7ecaac1ec2827a9e2d65ae2869cada \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/httpclient-4.5.14.jar.sha1 b/plugins/discovery-ec2/licenses/httpclient-4.5.14.jar.sha1 new file mode 100644 index 0000000000000..66e05851c2e3c --- /dev/null +++ b/plugins/discovery-ec2/licenses/httpclient-4.5.14.jar.sha1 @@ -0,0 +1 @@ +1194890e6f56ec29177673f2f12d0b8e627dec98 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/httpcore-4.4.15.jar.sha1 b/plugins/discovery-ec2/licenses/httpcore-4.4.15.jar.sha1 deleted file mode 100644 index 42a03b5d7a376..0000000000000 --- a/plugins/discovery-ec2/licenses/httpcore-4.4.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7f2e0c573eaa7a74bac2e89b359e1f73d92a0a1d \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/httpcore-4.4.16.jar.sha1 b/plugins/discovery-ec2/licenses/httpcore-4.4.16.jar.sha1 new file mode 100644 index 0000000000000..172110694b5bd --- /dev/null +++ b/plugins/discovery-ec2/licenses/httpcore-4.4.16.jar.sha1 @@ -0,0 +1 @@ +51cf043c87253c9f58b539c9f7e44c8894223850 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/httpclient-4.5.13.jar.sha1 b/plugins/discovery-gce/licenses/httpclient-4.5.13.jar.sha1 deleted file mode 100644 index 3281e21595b39..0000000000000 --- a/plugins/discovery-gce/licenses/httpclient-4.5.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e5f6cae5ca7ecaac1ec2827a9e2d65ae2869cada \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/httpclient-4.5.14.jar.sha1 b/plugins/discovery-gce/licenses/httpclient-4.5.14.jar.sha1 new file mode 100644 index 0000000000000..66e05851c2e3c --- /dev/null +++ b/plugins/discovery-gce/licenses/httpclient-4.5.14.jar.sha1 @@ -0,0 +1 @@ +1194890e6f56ec29177673f2f12d0b8e627dec98 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/httpcore-4.4.15.jar.sha1 b/plugins/discovery-gce/licenses/httpcore-4.4.15.jar.sha1 deleted file mode 100644 index 42a03b5d7a376..0000000000000 --- a/plugins/discovery-gce/licenses/httpcore-4.4.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7f2e0c573eaa7a74bac2e89b359e1f73d92a0a1d \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/httpcore-4.4.16.jar.sha1 b/plugins/discovery-gce/licenses/httpcore-4.4.16.jar.sha1 new file mode 100644 index 0000000000000..172110694b5bd --- /dev/null +++ b/plugins/discovery-gce/licenses/httpcore-4.4.16.jar.sha1 @@ -0,0 +1 @@ +51cf043c87253c9f58b539c9f7e44c8894223850 \ No newline at end of file diff --git a/plugins/events-correlation-engine/src/javaRestTest/java/org/opensearch/plugin/correlation/CorrelationVectorsEngineIT.java b/plugins/events-correlation-engine/src/javaRestTest/java/org/opensearch/plugin/correlation/CorrelationVectorsEngineIT.java new file mode 100644 index 0000000000000..f8736f05dadfc --- /dev/null +++ b/plugins/events-correlation-engine/src/javaRestTest/java/org/opensearch/plugin/correlation/CorrelationVectorsEngineIT.java @@ -0,0 +1,312 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.correlation; + +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.lucene.index.VectorSimilarityFunction; +import org.junit.Assert; +import org.opensearch.client.Request; +import org.opensearch.client.RequestOptions; +import org.opensearch.client.Response; +import org.opensearch.client.ResponseException; +import org.opensearch.client.RestClient; +import org.opensearch.client.WarningsHandler; +import org.opensearch.common.Strings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.IndexSettings; +import org.opensearch.rest.RestStatus; +import org.opensearch.test.rest.OpenSearchRestTestCase; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; + +/** + * Correlation Vectors Engine e2e tests + */ +public class CorrelationVectorsEngineIT extends OpenSearchRestTestCase { + + private static final int DIMENSION = 4; + private static final String PROPERTIES_FIELD_NAME = "properties"; + private static final String TYPE_FIELD_NAME = "type"; + private static final String CORRELATION_VECTOR_TYPE = "correlation_vector"; + private static final String DIMENSION_FIELD_NAME = "dimension"; + private static final int M = 16; + private static final int EF_CONSTRUCTION = 128; + private static final String INDEX_NAME = "test-index-1"; + private static final Float[][] TEST_VECTORS = new Float[][] { + { 1.0f, 1.0f, 1.0f, 1.0f }, + { 2.0f, 2.0f, 2.0f, 2.0f }, + { 3.0f, 3.0f, 3.0f, 3.0f } }; + private static final float[][] TEST_QUERY_VECTORS = new float[][] { + { 1.0f, 1.0f, 1.0f, 1.0f }, + { 2.0f, 2.0f, 2.0f, 2.0f }, + { 3.0f, 3.0f, 3.0f, 3.0f } }; + private static final Map> VECTOR_SIMILARITY_TO_SCORE = Map.of( + VectorSimilarityFunction.EUCLIDEAN, + (similarity) -> 1 / (1 + similarity), + VectorSimilarityFunction.DOT_PRODUCT, + (similarity) -> (1 + similarity) / 2, + VectorSimilarityFunction.COSINE, + (similarity) -> (1 + similarity) / 2 + ); + + /** + * test the e2e storage and query layer of events-correlation-engine + * @throws IOException IOException + */ + @SuppressWarnings("unchecked") + public void testQuery() throws IOException { + String textField = "text-field"; + String luceneField = "lucene-field"; + XContentBuilder builder = XContentFactory.jsonBuilder() + .startObject() + .startObject(PROPERTIES_FIELD_NAME) + .startObject(textField) + .field(TYPE_FIELD_NAME, "text") + .endObject() + .startObject(luceneField) + .field(TYPE_FIELD_NAME, CORRELATION_VECTOR_TYPE) + .field(DIMENSION_FIELD_NAME, DIMENSION) + .startObject("correlation_ctx") + .field("similarityFunction", VectorSimilarityFunction.EUCLIDEAN.name()) + .startObject("parameters") + .field("m", M) + .field("ef_construction", EF_CONSTRUCTION) + .endObject() + .endObject() + .endObject() + .endObject() + .endObject(); + + String mapping = Strings.toString(builder); + createTestIndexWithMappingJson(client(), INDEX_NAME, mapping, getCorrelationDefaultIndexSettings()); + + for (int idx = 0; idx < TEST_VECTORS.length; ++idx) { + addCorrelationDoc( + INDEX_NAME, + String.valueOf(idx + 1), + List.of(textField, luceneField), + List.of(java.util.UUID.randomUUID().toString(), TEST_VECTORS[idx]) + ); + } + refreshAllIndices(); + Assert.assertEquals(TEST_VECTORS.length, getDocCount(INDEX_NAME)); + + int k = 2; + for (float[] query : TEST_QUERY_VECTORS) { + + String correlationQuery = "{\n" + + " \"query\": {\n" + + " \"correlation\": {\n" + + " \"lucene-field\": {\n" + + " \"vector\": \n" + + Arrays.toString(query) + + " ,\n" + + " \"k\": 2,\n" + + " \"boost\": 1\n" + + " }\n" + + " }\n" + + " }\n" + + "}"; + + Response response = searchCorrelationIndex(INDEX_NAME, correlationQuery, k); + Map responseBody = entityAsMap(response); + Assert.assertEquals(2, ((List) ((Map) responseBody.get("hits")).get("hits")).size()); + @SuppressWarnings("unchecked") + double actualScore1 = Double.parseDouble( + ((List>) ((Map) responseBody.get("hits")).get("hits")).get(0).get("_score").toString() + ); + @SuppressWarnings("unchecked") + double actualScore2 = Double.parseDouble( + ((List>) ((Map) responseBody.get("hits")).get("hits")).get(1).get("_score").toString() + ); + @SuppressWarnings("unchecked") + List hit1 = ((Map>) ((List>) ((Map) responseBody.get("hits")) + .get("hits")).get(0).get("_source")).get(luceneField).stream().map(Double::floatValue).collect(Collectors.toList()); + float[] resultVector1 = new float[hit1.size()]; + for (int i = 0; i < hit1.size(); ++i) { + resultVector1[i] = hit1.get(i); + } + + @SuppressWarnings("unchecked") + List hit2 = ((Map>) ((List>) ((Map) responseBody.get("hits")) + .get("hits")).get(1).get("_source")).get(luceneField).stream().map(Double::floatValue).collect(Collectors.toList()); + float[] resultVector2 = new float[hit2.size()]; + for (int i = 0; i < hit2.size(); ++i) { + resultVector2[i] = hit2.get(i); + } + + double rawScore1 = VectorSimilarityFunction.EUCLIDEAN.compare(resultVector1, query); + Assert.assertEquals(rawScore1, actualScore1, 0.0001); + double rawScore2 = VectorSimilarityFunction.EUCLIDEAN.compare(resultVector2, query); + Assert.assertEquals(rawScore2, actualScore2, 0.0001); + } + } + + /** + * unhappy test for the e2e storage and query layer of events-correlation-engine with no index exist + */ + public void testQueryWithNoIndexExist() { + float[] query = new float[] { 1.0f, 1.0f, 1.0f, 1.0f }; + String correlationQuery = "{\n" + + " \"query\": {\n" + + " \"correlation\": {\n" + + " \"lucene-field\": {\n" + + " \"vector\": \n" + + Arrays.toString(query) + + " ,\n" + + " \"k\": 2,\n" + + " \"boost\": 1\n" + + " }\n" + + " }\n" + + " }\n" + + "}"; + Exception ex = assertThrows(ResponseException.class, () -> { searchCorrelationIndex(INDEX_NAME, correlationQuery, 2); }); + String expectedMessage = String.format(Locale.ROOT, "no such index [%s]", INDEX_NAME); + String actualMessage = ex.getMessage(); + Assert.assertTrue(actualMessage.contains(expectedMessage)); + } + + /** + * unhappy test for the e2e storage and query layer of events-correlation-engine with wrong mapping + */ + public void testQueryWithWrongMapping() throws IOException { + String textField = "text-field"; + String luceneField = "lucene-field"; + XContentBuilder builder = XContentFactory.jsonBuilder() + .startObject() + .startObject(PROPERTIES_FIELD_NAME) + .startObject(textField) + .field(TYPE_FIELD_NAME, "text") + .endObject() + .startObject(luceneField) + .field(TYPE_FIELD_NAME, CORRELATION_VECTOR_TYPE) + .field("test", DIMENSION) + .startObject("correlation_ctx") + .field("similarityFunction", VectorSimilarityFunction.EUCLIDEAN.name()) + .startObject("parameters") + .field("m", M) + .field("ef_construction", EF_CONSTRUCTION) + .endObject() + .endObject() + .endObject() + .endObject() + .endObject(); + + String mapping = Strings.toString(builder); + Exception ex = assertThrows(ResponseException.class, () -> { + createTestIndexWithMappingJson(client(), INDEX_NAME, mapping, getCorrelationDefaultIndexSettings()); + }); + + String expectedMessage = String.format( + Locale.ROOT, + "unknown parameter [test] on mapper [%s] of type [correlation_vector]", + luceneField + ); + String actualMessage = ex.getMessage(); + Assert.assertTrue(actualMessage.contains(expectedMessage)); + } + + private String createTestIndexWithMappingJson(RestClient client, String index, String mapping, Settings settings) throws IOException { + Request request = new Request("PUT", "/" + index); + String entity = "{\"settings\": " + Strings.toString(XContentType.JSON, settings); + if (mapping != null) { + entity = entity + ",\"mappings\" : " + mapping; + } + + entity = entity + "}"; + if (!settings.getAsBoolean(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true)) { + expectSoftDeletesWarning(request, index); + } + + request.setJsonEntity(entity); + client.performRequest(request); + return index; + } + + private Settings getCorrelationDefaultIndexSettings() { + return Settings.builder().put("number_of_shards", 1).put("number_of_replicas", 0).put("index.correlation", true).build(); + } + + private void addCorrelationDoc(String index, String docId, List fieldNames, List vectors) throws IOException { + Request request = new Request("POST", "/" + index + "/_doc/" + docId + "?refresh=true"); + + XContentBuilder builder = XContentFactory.jsonBuilder().startObject(); + for (int i = 0; i < fieldNames.size(); i++) { + builder.field(fieldNames.get(i), vectors.get(i)); + } + builder.endObject(); + + request.setJsonEntity(Strings.toString(builder)); + Response response = client().performRequest(request); + assertEquals(request.getEndpoint() + ": failed", RestStatus.CREATED, RestStatus.fromCode(response.getStatusLine().getStatusCode())); + } + + private Response searchCorrelationIndex(String index, String correlationQuery, int resultSize) throws IOException { + Request request = new Request("POST", "/" + index + "/_search"); + + request.addParameter("size", Integer.toString(resultSize)); + request.addParameter("explain", Boolean.toString(true)); + request.addParameter("search_type", "query_then_fetch"); + request.setJsonEntity(correlationQuery); + + Response response = client().performRequest(request); + Assert.assertEquals("Search failed", RestStatus.OK, restStatus(response)); + return response; + } + + private int getDocCount(String index) throws IOException { + Response response = makeRequest( + client(), + "GET", + String.format(Locale.getDefault(), "/%s/_count", index), + Collections.emptyMap(), + null + ); + Assert.assertEquals(RestStatus.OK, restStatus(response)); + return Integer.parseInt(entityAsMap(response).get("count").toString()); + } + + private Response makeRequest( + RestClient client, + String method, + String endpoint, + Map params, + HttpEntity entity, + Header... headers + ) throws IOException { + Request request = new Request(method, endpoint); + RequestOptions.Builder options = RequestOptions.DEFAULT.toBuilder(); + options.setWarningsHandler(WarningsHandler.PERMISSIVE); + + for (Header header : headers) { + options.addHeader(header.getName(), header.getValue()); + } + request.setOptions(options.build()); + request.addParameters(params); + if (entity != null) { + request.setEntity(entity); + } + return client.performRequest(request); + } + + private RestStatus restStatus(Response response) { + return RestStatus.fromCode(response.getStatusLine().getStatusCode()); + } +} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/EventsCorrelationPlugin.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/EventsCorrelationPlugin.java index 443a794bd99df..6945f21a0fd7c 100644 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/EventsCorrelationPlugin.java +++ b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/EventsCorrelationPlugin.java @@ -23,13 +23,23 @@ import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.Environment; import org.opensearch.env.NodeEnvironment; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.codec.CodecServiceFactory; +import org.opensearch.index.mapper.Mapper; +import org.opensearch.plugin.correlation.core.index.codec.CorrelationCodecService; +import org.opensearch.plugin.correlation.core.index.mapper.CorrelationVectorFieldMapper; +import org.opensearch.plugin.correlation.core.index.mapper.VectorFieldMapper; +import org.opensearch.plugin.correlation.core.index.query.CorrelationQueryBuilder; import org.opensearch.plugin.correlation.rules.action.IndexCorrelationRuleAction; import org.opensearch.plugin.correlation.rules.resthandler.RestIndexCorrelationRuleAction; import org.opensearch.plugin.correlation.rules.transport.TransportIndexCorrelationRuleAction; import org.opensearch.plugin.correlation.settings.EventsCorrelationSettings; import org.opensearch.plugin.correlation.utils.CorrelationRuleIndices; import org.opensearch.plugins.ActionPlugin; +import org.opensearch.plugins.EnginePlugin; +import org.opensearch.plugins.MapperPlugin; import org.opensearch.plugins.Plugin; +import org.opensearch.plugins.SearchPlugin; import org.opensearch.repositories.RepositoriesService; import org.opensearch.rest.RestController; import org.opensearch.rest.RestHandler; @@ -38,13 +48,16 @@ import org.opensearch.watcher.ResourceWatcherService; import java.util.Collection; +import java.util.Collections; import java.util.List; +import java.util.Map; +import java.util.Optional; import java.util.function.Supplier; /** * Plugin class for events-correlation-engine */ -public class EventsCorrelationPlugin extends Plugin implements ActionPlugin { +public class EventsCorrelationPlugin extends Plugin implements ActionPlugin, MapperPlugin, SearchPlugin, EnginePlugin { /** * events-correlation-engine base uri @@ -93,6 +106,30 @@ public List getRestHandlers( return List.of(new RestIndexCorrelationRuleAction()); } + @Override + public Map getMappers() { + return Collections.singletonMap(CorrelationVectorFieldMapper.CONTENT_TYPE, new VectorFieldMapper.TypeParser()); + } + + @Override + public Optional getCustomCodecServiceFactory(IndexSettings indexSettings) { + if (indexSettings.getValue(EventsCorrelationSettings.IS_CORRELATION_INDEX_SETTING)) { + return Optional.of(CorrelationCodecService::new); + } + return Optional.empty(); + } + + @Override + public List> getQueries() { + return Collections.singletonList( + new QuerySpec<>( + CorrelationQueryBuilder.NAME_FIELD.getPreferredName(), + CorrelationQueryBuilder::new, + CorrelationQueryBuilder::parse + ) + ); + } + @Override public List> getActions() { return List.of(new ActionPlugin.ActionHandler<>(IndexCorrelationRuleAction.INSTANCE, TransportIndexCorrelationRuleAction.class)); diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/CorrelationParamsContext.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/CorrelationParamsContext.java new file mode 100644 index 0000000000000..c7a23dabfd312 --- /dev/null +++ b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/CorrelationParamsContext.java @@ -0,0 +1,148 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.correlation.core.index; + +import org.apache.lucene.index.VectorSimilarityFunction; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.mapper.MapperParsingException; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; + +/** + * Defines vector similarity function, m and ef_construction hyper parameters field mappings for correlation_vector type. + * + * @opensearch.internal + */ +public class CorrelationParamsContext implements ToXContentFragment, Writeable { + + /** + * Vector Similarity Function field + */ + public static final String VECTOR_SIMILARITY_FUNCTION = "similarityFunction"; + /** + * Parameters field to define m and ef_construction + */ + public static final String PARAMETERS = "parameters"; + + private final VectorSimilarityFunction similarityFunction; + private final Map parameters; + + /** + * Parameterized ctor for CorrelationParamsContext + * @param similarityFunction Vector Similarity Function + * @param parameters Parameters to define m and ef_construction + */ + public CorrelationParamsContext(VectorSimilarityFunction similarityFunction, Map parameters) { + this.similarityFunction = similarityFunction; + this.parameters = parameters; + } + + /** + * Parameterized ctor for CorrelationParamsContext + * @param sin StreamInput + * @throws IOException IOException + */ + public CorrelationParamsContext(StreamInput sin) throws IOException { + this.similarityFunction = VectorSimilarityFunction.valueOf(sin.readString()); + if (sin.available() > 0) { + this.parameters = sin.readMap(); + } else { + this.parameters = null; + } + } + + /** + * Parse into CorrelationParamsContext + * @param in Object + * @return CorrelationParamsContext + */ + public static CorrelationParamsContext parse(Object in) { + if (!(in instanceof Map)) { + throw new MapperParsingException("Unable to parse CorrelationParamsContext"); + } + + @SuppressWarnings("unchecked") + Map contextMap = (Map) in; + VectorSimilarityFunction similarityFunction = VectorSimilarityFunction.EUCLIDEAN; + Map parameters = new HashMap<>(); + + if (contextMap.containsKey(VECTOR_SIMILARITY_FUNCTION)) { + Object value = contextMap.get(VECTOR_SIMILARITY_FUNCTION); + + if (value != null && !(value instanceof String)) { + throw new MapperParsingException(String.format(Locale.getDefault(), "%s must be a string", VECTOR_SIMILARITY_FUNCTION)); + } + + try { + similarityFunction = VectorSimilarityFunction.valueOf((String) value); + } catch (IllegalArgumentException ex) { + throw new MapperParsingException(String.format(Locale.getDefault(), "Invalid %s: %s", VECTOR_SIMILARITY_FUNCTION, value)); + } + } + if (contextMap.containsKey(PARAMETERS)) { + Object value = contextMap.get(PARAMETERS); + if (!(value instanceof Map)) { + throw new MapperParsingException("Unable to parse parameters for Correlation context"); + } + + @SuppressWarnings("unchecked") + Map valueMap = (Map) value; + parameters.putAll(valueMap); + } + return new CorrelationParamsContext(similarityFunction, parameters); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(VECTOR_SIMILARITY_FUNCTION, similarityFunction.name()); + if (params == null) { + builder.field(PARAMETERS, (String) null); + } else { + builder.startObject(PARAMETERS); + for (Map.Entry parameter : parameters.entrySet()) { + builder.field(parameter.getKey(), parameter.getValue()); + } + builder.endObject(); + } + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(similarityFunction.name()); + if (this.parameters != null) { + out.writeMap(parameters); + } + } + + /** + * get Vector Similarity Function + * @return Vector Similarity Function + */ + public VectorSimilarityFunction getSimilarityFunction() { + return similarityFunction; + } + + /** + * Get Parameters to define m and ef_construction + * @return Parameters to define m and ef_construction + */ + public Map getParameters() { + return parameters; + } +} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/VectorField.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/VectorField.java new file mode 100644 index 0000000000000..61efd6b9a87ae --- /dev/null +++ b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/VectorField.java @@ -0,0 +1,51 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.correlation.core.index; + +import org.apache.lucene.document.Field; +import org.apache.lucene.index.IndexableFieldType; +import org.apache.lucene.util.BytesRef; +import org.opensearch.common.io.stream.BytesStreamOutput; + +import java.io.IOException; + +/** + * Generic Vector Field defining a correlation vector name, float array. + * + * @opensearch.internal + */ +public class VectorField extends Field { + + /** + * Parameterized ctor for VectorField + * @param name name of the field + * @param value float array value for the field + * @param type type of the field + */ + public VectorField(String name, float[] value, IndexableFieldType type) { + super(name, new BytesRef(), type); + try { + final byte[] floatToByte = floatToByteArray(value); + this.setBytesValue(floatToByte); + } catch (IOException ex) { + throw new RuntimeException(ex); + } + } + + /** + * converts float array based vector to byte array. + * @param input float array + * @return byte array + */ + protected static byte[] floatToByteArray(float[] input) throws IOException { + BytesStreamOutput objectStream = new BytesStreamOutput(); + objectStream.writeFloatArray(input); + return objectStream.bytes().toBytesRef().bytes; + } +} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/BasePerFieldCorrelationVectorsFormat.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/BasePerFieldCorrelationVectorsFormat.java new file mode 100644 index 0000000000000..7763b1e42d63e --- /dev/null +++ b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/BasePerFieldCorrelationVectorsFormat.java @@ -0,0 +1,104 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.correlation.core.index.codec; + +import org.apache.lucene.codecs.KnnVectorsFormat; +import org.apache.lucene.codecs.perfield.PerFieldKnnVectorsFormat; +import org.opensearch.index.mapper.MapperService; +import org.opensearch.plugin.correlation.core.index.mapper.CorrelationVectorFieldMapper; + +import java.util.Locale; +import java.util.Map; +import java.util.Optional; +import java.util.function.BiFunction; +import java.util.function.Supplier; + +/** + * Class to define the hyper-parameters m and ef_construction for insert and store of correlation vectors into HNSW graphs based lucene index. + * + * @opensearch.internal + */ +public abstract class BasePerFieldCorrelationVectorsFormat extends PerFieldKnnVectorsFormat { + /** + * the hyper-parameters for constructing HNSW graphs. + * https://lucene.apache.org/core/9_4_0/core/org/apache/lucene/util/hnsw/HnswGraph.html + */ + public static final String METHOD_PARAMETER_M = "m"; + /** + * the hyper-parameters for constructing HNSW graphs. + * https://lucene.apache.org/core/9_4_0/core/org/apache/lucene/util/hnsw/HnswGraph.html + */ + public static final String METHOD_PARAMETER_EF_CONSTRUCTION = "ef_construction"; + + private final Optional mapperService; + private final int defaultMaxConnections; + private final int defaultBeamWidth; + private final Supplier defaultFormatSupplier; + private final BiFunction formatSupplier; + + /** + * Parameterized ctor of BasePerFieldCorrelationVectorsFormat + * @param mapperService mapper service + * @param defaultMaxConnections default m + * @param defaultBeamWidth default ef_construction + * @param defaultFormatSupplier default format supplier + * @param formatSupplier format supplier + */ + public BasePerFieldCorrelationVectorsFormat( + Optional mapperService, + int defaultMaxConnections, + int defaultBeamWidth, + Supplier defaultFormatSupplier, + BiFunction formatSupplier + ) { + this.mapperService = mapperService; + this.defaultMaxConnections = defaultMaxConnections; + this.defaultBeamWidth = defaultBeamWidth; + this.defaultFormatSupplier = defaultFormatSupplier; + this.formatSupplier = formatSupplier; + } + + @Override + public KnnVectorsFormat getKnnVectorsFormatForField(String field) { + if (!isCorrelationVectorFieldType(field)) { + return defaultFormatSupplier.get(); + } + + var type = (CorrelationVectorFieldMapper.CorrelationVectorFieldType) mapperService.orElseThrow( + () -> new IllegalArgumentException( + String.format(Locale.getDefault(), "Cannot read field type for field [%s] because mapper service is not available", field) + ) + ).fieldType(field); + + var params = type.getCorrelationParams().getParameters(); + int maxConnections = getMaxConnections(params); + int beamWidth = getBeamWidth(params); + + return formatSupplier.apply(maxConnections, beamWidth); + } + + private boolean isCorrelationVectorFieldType(final String field) { + return mapperService.isPresent() + && mapperService.get().fieldType(field) instanceof CorrelationVectorFieldMapper.CorrelationVectorFieldType; + } + + private int getMaxConnections(final Map params) { + if (params != null && params.containsKey(METHOD_PARAMETER_M)) { + return (int) params.get(METHOD_PARAMETER_M); + } + return defaultMaxConnections; + } + + private int getBeamWidth(final Map params) { + if (params != null && params.containsKey(METHOD_PARAMETER_EF_CONSTRUCTION)) { + return (int) params.get(METHOD_PARAMETER_EF_CONSTRUCTION); + } + return defaultBeamWidth; + } +} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/CorrelationCodecService.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/CorrelationCodecService.java new file mode 100644 index 0000000000000..0b70e7ed66f3d --- /dev/null +++ b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/CorrelationCodecService.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.correlation.core.index.codec; + +import org.apache.lucene.codecs.Codec; +import org.opensearch.index.codec.CodecService; +import org.opensearch.index.codec.CodecServiceConfig; +import org.opensearch.index.mapper.MapperService; + +/** + * custom Correlation Codec Service + * + * @opensearch.internal + */ +public class CorrelationCodecService extends CodecService { + + private final MapperService mapperService; + + /** + * Parameterized ctor for CorrelationCodecService + * @param codecServiceConfig Generic codec service config + */ + public CorrelationCodecService(CodecServiceConfig codecServiceConfig) { + super(codecServiceConfig.getMapperService(), codecServiceConfig.getLogger()); + mapperService = codecServiceConfig.getMapperService(); + } + + @Override + public Codec codec(String name) { + return CorrelationCodecVersion.current().getCorrelationCodecSupplier().apply(super.codec(name), mapperService); + } +} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/CorrelationCodecVersion.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/CorrelationCodecVersion.java new file mode 100644 index 0000000000000..5e2cb8bfbc03a --- /dev/null +++ b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/CorrelationCodecVersion.java @@ -0,0 +1,103 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.correlation.core.index.codec; + +import org.apache.lucene.codecs.Codec; +import org.apache.lucene.codecs.lucene95.Lucene95Codec; +import org.opensearch.index.mapper.MapperService; +import org.opensearch.plugin.correlation.core.index.codec.correlation950.CorrelationCodec; +import org.opensearch.plugin.correlation.core.index.codec.correlation950.PerFieldCorrelationVectorsFormat; + +import java.util.Optional; +import java.util.function.BiFunction; +import java.util.function.Supplier; + +/** + * CorrelationCodecVersion enum + * + * @opensearch.internal + */ +public enum CorrelationCodecVersion { + V_9_5_0( + "CorrelationCodec", + new Lucene95Codec(), + new PerFieldCorrelationVectorsFormat(Optional.empty()), + (userCodec, mapperService) -> new CorrelationCodec(userCodec, new PerFieldCorrelationVectorsFormat(Optional.of(mapperService))), + CorrelationCodec::new + ); + + private static final CorrelationCodecVersion CURRENT = V_9_5_0; + private final String codecName; + private final Codec defaultCodecDelegate; + private final PerFieldCorrelationVectorsFormat perFieldCorrelationVectorsFormat; + private final BiFunction correlationCodecSupplier; + private final Supplier defaultCorrelationCodecSupplier; + + CorrelationCodecVersion( + String codecName, + Codec defaultCodecDelegate, + PerFieldCorrelationVectorsFormat perFieldCorrelationVectorsFormat, + BiFunction correlationCodecSupplier, + Supplier defaultCorrelationCodecSupplier + ) { + this.codecName = codecName; + this.defaultCodecDelegate = defaultCodecDelegate; + this.perFieldCorrelationVectorsFormat = perFieldCorrelationVectorsFormat; + this.correlationCodecSupplier = correlationCodecSupplier; + this.defaultCorrelationCodecSupplier = defaultCorrelationCodecSupplier; + } + + /** + * get codec name + * @return codec name + */ + public String getCodecName() { + return codecName; + } + + /** + * get default codec delegate + * @return default codec delegate + */ + public Codec getDefaultCodecDelegate() { + return defaultCodecDelegate; + } + + /** + * get correlation vectors format + * @return correlation vectors format + */ + public PerFieldCorrelationVectorsFormat getPerFieldCorrelationVectorsFormat() { + return perFieldCorrelationVectorsFormat; + } + + /** + * get correlation codec supplier + * @return correlation codec supplier + */ + public BiFunction getCorrelationCodecSupplier() { + return correlationCodecSupplier; + } + + /** + * get default correlation codec supplier + * @return default correlation codec supplier + */ + public Supplier getDefaultCorrelationCodecSupplier() { + return defaultCorrelationCodecSupplier; + } + + /** + * static method to get correlation codec version + * @return correlation codec version + */ + public static final CorrelationCodecVersion current() { + return CURRENT; + } +} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation950/CorrelationCodec.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation950/CorrelationCodec.java new file mode 100644 index 0000000000000..f91ba429fbea9 --- /dev/null +++ b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation950/CorrelationCodec.java @@ -0,0 +1,46 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.correlation.core.index.codec.correlation950; + +import org.apache.lucene.codecs.Codec; +import org.apache.lucene.codecs.FilterCodec; +import org.apache.lucene.codecs.KnnVectorsFormat; +import org.opensearch.plugin.correlation.core.index.codec.CorrelationCodecVersion; + +/** + * Correlation Codec class + * + * @opensearch.internal + */ +public class CorrelationCodec extends FilterCodec { + private static final CorrelationCodecVersion VERSION = CorrelationCodecVersion.V_9_5_0; + private final PerFieldCorrelationVectorsFormat perFieldCorrelationVectorsFormat; + + /** + * ctor for CorrelationCodec + */ + public CorrelationCodec() { + this(VERSION.getDefaultCodecDelegate(), VERSION.getPerFieldCorrelationVectorsFormat()); + } + + /** + * Parameterized ctor for CorrelationCodec + * @param delegate codec delegate + * @param perFieldCorrelationVectorsFormat correlation vectors format + */ + public CorrelationCodec(Codec delegate, PerFieldCorrelationVectorsFormat perFieldCorrelationVectorsFormat) { + super(VERSION.getCodecName(), delegate); + this.perFieldCorrelationVectorsFormat = perFieldCorrelationVectorsFormat; + } + + @Override + public KnnVectorsFormat knnVectorsFormat() { + return perFieldCorrelationVectorsFormat; + } +} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation950/PerFieldCorrelationVectorsFormat.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation950/PerFieldCorrelationVectorsFormat.java new file mode 100644 index 0000000000000..f6862ecc17736 --- /dev/null +++ b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation950/PerFieldCorrelationVectorsFormat.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.correlation.core.index.codec.correlation950; + +import org.apache.lucene.codecs.lucene95.Lucene95HnswVectorsFormat; +import org.opensearch.index.mapper.MapperService; +import org.opensearch.plugin.correlation.core.index.codec.BasePerFieldCorrelationVectorsFormat; + +import java.util.Optional; + +/** + * Class to define the hyper-parameters m and ef_construction for insert and store of correlation vectors into HNSW graphs based lucene index. + */ +public class PerFieldCorrelationVectorsFormat extends BasePerFieldCorrelationVectorsFormat { + + /** + * Parameterized ctor for PerFieldCorrelationVectorsFormat + * @param mapperService mapper service + */ + public PerFieldCorrelationVectorsFormat(final Optional mapperService) { + super( + mapperService, + Lucene95HnswVectorsFormat.DEFAULT_MAX_CONN, + Lucene95HnswVectorsFormat.DEFAULT_BEAM_WIDTH, + Lucene95HnswVectorsFormat::new, + Lucene95HnswVectorsFormat::new + ); + } +} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation950/package-info.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation950/package-info.java new file mode 100644 index 0000000000000..b4dad34d2718e --- /dev/null +++ b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation950/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * custom Lucene9.5 codec package for events-correlation-engine + */ +package org.opensearch.plugin.correlation.core.index.codec.correlation950; diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/package-info.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/package-info.java new file mode 100644 index 0000000000000..862b7cd253f04 --- /dev/null +++ b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * custom codec package for events-correlation-engine + */ +package org.opensearch.plugin.correlation.core.index.codec; diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapper.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapper.java new file mode 100644 index 0000000000000..a1918f3c954d0 --- /dev/null +++ b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapper.java @@ -0,0 +1,174 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.correlation.core.index.mapper; + +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.KnnFloatVectorField; +import org.apache.lucene.document.StoredField; +import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.index.VectorSimilarityFunction; +import org.opensearch.common.Explicit; +import org.opensearch.index.mapper.FieldMapper; +import org.opensearch.index.mapper.ParseContext; +import org.opensearch.plugin.correlation.core.index.CorrelationParamsContext; +import org.opensearch.plugin.correlation.core.index.VectorField; + +import java.io.IOException; +import java.util.Locale; +import java.util.Optional; + +import static org.apache.lucene.index.FloatVectorValues.MAX_DIMENSIONS; + +/** + * Field mapper for the correlation vector type + * + * @opensearch.internal + */ +public class CorrelationVectorFieldMapper extends VectorFieldMapper { + + private static final int LUCENE_MAX_DIMENSION = MAX_DIMENSIONS; + + private final FieldType vectorFieldType; + + /** + * Parameterized ctor for CorrelationVectorFieldMapper + * @param input Object containing name of the field, type and other details. + */ + public CorrelationVectorFieldMapper(final CreateLuceneFieldMapperInput input) { + super( + input.getName(), + input.getMappedFieldType(), + input.getMultiFields(), + input.getCopyTo(), + input.getIgnoreMalformed(), + input.isStored(), + input.isHasDocValues() + ); + + this.correlationParams = input.getCorrelationParams(); + final VectorSimilarityFunction vectorSimilarityFunction = this.correlationParams.getSimilarityFunction(); + + final int dimension = input.getMappedFieldType().getDimension(); + if (dimension > LUCENE_MAX_DIMENSION) { + throw new IllegalArgumentException( + String.format( + Locale.ROOT, + "Dimension value cannot be greater than [%s] but got [%s] for vector [%s]", + LUCENE_MAX_DIMENSION, + dimension, + input.getName() + ) + ); + } + + this.fieldType = KnnFloatVectorField.createFieldType(dimension, vectorSimilarityFunction); + + if (this.hasDocValues) { + this.vectorFieldType = buildDocValuesFieldType(); + } else { + this.vectorFieldType = null; + } + } + + private static FieldType buildDocValuesFieldType() { + FieldType field = new FieldType(); + field.setDocValuesType(DocValuesType.BINARY); + field.freeze(); + return field; + } + + @Override + protected void parseCreateField(ParseContext context, int dimension) throws IOException { + Optional arrayOptional = getFloatsFromContext(context, dimension); + + if (arrayOptional.isEmpty()) { + return; + } + final float[] array = arrayOptional.get(); + + KnnFloatVectorField point = new KnnFloatVectorField(name(), array, fieldType); + + context.doc().add(point); + if (fieldType.stored()) { + context.doc().add(new StoredField(name(), point.toString())); + } + if (hasDocValues && vectorFieldType != null) { + context.doc().add(new VectorField(name(), array, vectorFieldType)); + } + context.path().remove(); + } + + static class CreateLuceneFieldMapperInput { + String name; + + CorrelationVectorFieldType mappedFieldType; + + FieldMapper.MultiFields multiFields; + + FieldMapper.CopyTo copyTo; + + Explicit ignoreMalformed; + boolean stored; + boolean hasDocValues; + + CorrelationParamsContext correlationParams; + + public CreateLuceneFieldMapperInput( + String name, + CorrelationVectorFieldType mappedFieldType, + FieldMapper.MultiFields multiFields, + FieldMapper.CopyTo copyTo, + Explicit ignoreMalformed, + boolean stored, + boolean hasDocValues, + CorrelationParamsContext correlationParams + ) { + this.name = name; + this.mappedFieldType = mappedFieldType; + this.multiFields = multiFields; + this.copyTo = copyTo; + this.ignoreMalformed = ignoreMalformed; + this.stored = stored; + this.hasDocValues = hasDocValues; + this.correlationParams = correlationParams; + } + + public String getName() { + return name; + } + + public CorrelationVectorFieldType getMappedFieldType() { + return mappedFieldType; + } + + public FieldMapper.MultiFields getMultiFields() { + return multiFields; + } + + public FieldMapper.CopyTo getCopyTo() { + return copyTo; + } + + public Explicit getIgnoreMalformed() { + return ignoreMalformed; + } + + public boolean isStored() { + return stored; + } + + public boolean isHasDocValues() { + return hasDocValues; + } + + public CorrelationParamsContext getCorrelationParams() { + return correlationParams; + } + } +} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/VectorFieldMapper.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/VectorFieldMapper.java new file mode 100644 index 0000000000000..5ac6d92792295 --- /dev/null +++ b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/VectorFieldMapper.java @@ -0,0 +1,399 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.correlation.core.index.mapper; + +import org.apache.lucene.search.FieldExistsQuery; +import org.apache.lucene.search.Query; +import org.opensearch.common.Explicit; +import org.opensearch.common.xcontent.support.XContentMapValues; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.index.mapper.FieldMapper; +import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.index.mapper.Mapper; +import org.opensearch.index.mapper.MapperParsingException; +import org.opensearch.index.mapper.ParametrizedFieldMapper; +import org.opensearch.index.mapper.ParseContext; +import org.opensearch.index.mapper.TextSearchInfo; +import org.opensearch.index.mapper.ValueFetcher; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.index.query.QueryShardException; +import org.opensearch.plugin.correlation.core.index.CorrelationParamsContext; +import org.opensearch.search.lookup.SearchLookup; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Optional; + +/** + * Parameterized field mapper for Correlation Vector type + * + * @opensearch.internal + */ +public abstract class VectorFieldMapper extends ParametrizedFieldMapper { + + /** + * name of Correlation Vector type + */ + public static final String CONTENT_TYPE = "correlation_vector"; + /** + * dimension of the correlation vectors + */ + public static final String DIMENSION = "dimension"; + /** + * context e.g. parameters and vector similarity function of Correlation Vector type + */ + public static final String CORRELATION_CONTEXT = "correlation_ctx"; + + private static VectorFieldMapper toType(FieldMapper in) { + return (VectorFieldMapper) in; + } + + /** + * definition of VectorFieldMapper.Builder + */ + public static class Builder extends ParametrizedFieldMapper.Builder { + protected Boolean ignoreMalformed; + + protected final Parameter stored = Parameter.boolParam("store", false, m -> toType(m).stored, false); + protected final Parameter hasDocValues = Parameter.boolParam("doc_values", false, m -> toType(m).hasDocValues, true); + protected final Parameter dimension = new Parameter<>(DIMENSION, false, () -> -1, (n, c, o) -> { + if (o == null) { + throw new IllegalArgumentException("Dimension cannot be null"); + } + int value; + try { + value = XContentMapValues.nodeIntegerValue(o); + } catch (Exception ex) { + throw new IllegalArgumentException( + String.format(Locale.getDefault(), "Unable to parse [dimension] from provided value [%s] for vector [%s]", o, name) + ); + } + if (value <= 0) { + throw new IllegalArgumentException( + String.format(Locale.getDefault(), "Dimension value must be greater than 0 for vector: %s", name) + ); + } + return value; + }, m -> toType(m).dimension); + + protected final Parameter correlationParamsContext = new Parameter<>( + CORRELATION_CONTEXT, + false, + () -> null, + (n, c, o) -> CorrelationParamsContext.parse(o), + m -> toType(m).correlationParams + ); + + protected final Parameter> meta = Parameter.metaParam(); + + /** + * Parameterized ctor for VectorFieldMapper.Builder + * @param name name + */ + public Builder(String name) { + super(name); + } + + @Override + protected List> getParameters() { + return Arrays.asList(stored, hasDocValues, dimension, meta, correlationParamsContext); + } + + protected Explicit ignoreMalformed(BuilderContext context) { + if (ignoreMalformed != null) { + return new Explicit<>(ignoreMalformed, true); + } + if (context.indexSettings() != null) { + return new Explicit<>(IGNORE_MALFORMED_SETTING.get(context.indexSettings()), false); + } + return Defaults.IGNORE_MALFORMED; + } + + @Override + public ParametrizedFieldMapper build(BuilderContext context) { + final CorrelationParamsContext correlationParams = correlationParamsContext.getValue(); + final MultiFields multiFieldsBuilder = this.multiFieldsBuilder.build(this, context); + final CopyTo copyToBuilder = copyTo.build(); + final Explicit ignoreMalformed = ignoreMalformed(context); + final Map metaValue = meta.getValue(); + + final CorrelationVectorFieldType mappedFieldType = new CorrelationVectorFieldType( + buildFullName(context), + metaValue, + dimension.getValue(), + correlationParams + ); + + CorrelationVectorFieldMapper.CreateLuceneFieldMapperInput createLuceneFieldMapperInput = + new CorrelationVectorFieldMapper.CreateLuceneFieldMapperInput( + name, + mappedFieldType, + multiFieldsBuilder, + copyToBuilder, + ignoreMalformed, + stored.get(), + hasDocValues.get(), + correlationParams + ); + return new CorrelationVectorFieldMapper(createLuceneFieldMapperInput); + } + } + + /** + * deifintion of VectorFieldMapper.TypeParser + */ + public static class TypeParser implements Mapper.TypeParser { + + /** + * default constructor of VectorFieldMapper.TypeParser + */ + public TypeParser() {} + + @Override + public Mapper.Builder parse(String name, Map node, ParserContext context) throws MapperParsingException { + Builder builder = new VectorFieldMapper.Builder(name); + builder.parse(name, context, node); + + if (builder.dimension.getValue() == -1) { + throw new IllegalArgumentException(String.format(Locale.getDefault(), "Dimension value missing for vector: %s", name)); + } + return builder; + } + } + + /** + * deifintion of VectorFieldMapper.CorrelationVectorFieldType + */ + public static class CorrelationVectorFieldType extends MappedFieldType { + int dimension; + CorrelationParamsContext correlationParams; + + /** + * Parameterized ctor for VectorFieldMapper.CorrelationVectorFieldType + * @param name name of the field + * @param meta meta of the field + * @param dimension dimension of the field + */ + public CorrelationVectorFieldType(String name, Map meta, int dimension) { + this(name, meta, dimension, null); + } + + /** + * Parameterized ctor for VectorFieldMapper.CorrelationVectorFieldType + * @param name name of the field + * @param meta meta of the field + * @param dimension dimension of the field + * @param correlationParams correlation params for the field + */ + public CorrelationVectorFieldType( + String name, + Map meta, + int dimension, + CorrelationParamsContext correlationParams + ) { + super(name, false, false, true, TextSearchInfo.NONE, meta); + this.dimension = dimension; + this.correlationParams = correlationParams; + } + + @Override + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String s) { + throw new UnsupportedOperationException("Correlation Vector do not support fields search"); + } + + @Override + public String typeName() { + return CONTENT_TYPE; + } + + @Override + public Query existsQuery(QueryShardContext context) { + return new FieldExistsQuery(name()); + } + + @Override + public Query termQuery(Object o, QueryShardContext context) { + throw new QueryShardException( + context, + String.format( + Locale.getDefault(), + "Correlation vector do not support exact searching, use Correlation queries instead: [%s]", + name() + ) + ); + } + + /** + * get dimension + * @return dimension + */ + public int getDimension() { + return dimension; + } + + /** + * get correlation params + * @return correlation params + */ + public CorrelationParamsContext getCorrelationParams() { + return correlationParams; + } + } + + protected Explicit ignoreMalformed; + protected boolean stored; + protected boolean hasDocValues; + protected Integer dimension; + protected CorrelationParamsContext correlationParams; + + /** + * Parameterized ctor for VectorFieldMapper + * @param simpleName name of field + * @param mappedFieldType field type of field + * @param multiFields multi fields + * @param copyTo copy to + * @param ignoreMalformed ignore malformed + * @param stored stored field + * @param hasDocValues has doc values + */ + public VectorFieldMapper( + String simpleName, + CorrelationVectorFieldType mappedFieldType, + FieldMapper.MultiFields multiFields, + FieldMapper.CopyTo copyTo, + Explicit ignoreMalformed, + boolean stored, + boolean hasDocValues + ) { + super(simpleName, mappedFieldType, multiFields, copyTo); + this.ignoreMalformed = ignoreMalformed; + this.stored = stored; + this.hasDocValues = hasDocValues; + this.dimension = mappedFieldType.getDimension(); + } + + @Override + protected VectorFieldMapper clone() { + return (VectorFieldMapper) super.clone(); + } + + @Override + protected String contentType() { + return CONTENT_TYPE; + } + + @Override + protected void parseCreateField(ParseContext parseContext) throws IOException { + parseCreateField(parseContext, fieldType().getDimension()); + } + + protected abstract void parseCreateField(ParseContext parseContext, int dimension) throws IOException; + + Optional getFloatsFromContext(ParseContext context, int dimension) throws IOException { + context.path().add(simpleName()); + + List vector = new ArrayList<>(); + XContentParser.Token token = context.parser().currentToken(); + float value; + if (token == XContentParser.Token.START_ARRAY) { + token = context.parser().nextToken(); + while (token != XContentParser.Token.END_ARRAY) { + value = context.parser().floatValue(); + + if (Float.isNaN(value)) { + throw new IllegalArgumentException("Correlation vector values cannot be NaN"); + } + + if (Float.isInfinite(value)) { + throw new IllegalArgumentException("Correlation vector values cannot be infinity"); + } + vector.add(value); + token = context.parser().nextToken(); + } + } else if (token == XContentParser.Token.VALUE_NUMBER) { + value = context.parser().floatValue(); + if (Float.isNaN(value)) { + throw new IllegalArgumentException("Correlation vector values cannot be NaN"); + } + + if (Float.isInfinite(value)) { + throw new IllegalArgumentException("Correlation vector values cannot be infinity"); + } + vector.add(value); + context.parser().nextToken(); + } else if (token == XContentParser.Token.VALUE_NULL) { + context.path().remove(); + return Optional.empty(); + } + + if (dimension != vector.size()) { + String errorMessage = String.format( + Locale.ROOT, + "Vector dimension mismatch. Expected: %d, Given: %d", + dimension, + vector.size() + ); + throw new IllegalArgumentException(errorMessage); + } + + float[] array = new float[vector.size()]; + int i = 0; + for (Float f : vector) { + array[i++] = f; + } + return Optional.of(array); + } + + @Override + protected boolean docValuesByDefault() { + return true; + } + + @Override + public ParametrizedFieldMapper.Builder getMergeBuilder() { + return new VectorFieldMapper.Builder(simpleName()).init(this); + } + + @Override + public boolean parsesArrayValue() { + return true; + } + + @Override + public CorrelationVectorFieldType fieldType() { + return (CorrelationVectorFieldType) super.fieldType(); + } + + @Override + protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException { + super.doXContentBody(builder, includeDefaults, params); + if (includeDefaults || ignoreMalformed.explicit()) { + builder.field(Names.IGNORE_MALFORMED, ignoreMalformed.value()); + } + } + + /** + * Class for constants used in parent class VectorFieldMapper + */ + public static class Names { + public static final String IGNORE_MALFORMED = "ignore_malformed"; + } + + /** + * Class for constants used in parent class VectorFieldMapper + */ + public static class Defaults { + public static final Explicit IGNORE_MALFORMED = new Explicit<>(false, false); + } +} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/package-info.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/package-info.java new file mode 100644 index 0000000000000..4fdc622c3d886 --- /dev/null +++ b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * correlation field mapper package + */ +package org.opensearch.plugin.correlation.core.index.mapper; diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/package-info.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/package-info.java new file mode 100644 index 0000000000000..cfc0ffdfa81f1 --- /dev/null +++ b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * package to wrap Lucene KnnFloatVectorField and KnnFloatVectorQuery for Opensearch events-correlation-engine + */ +package org.opensearch.plugin.correlation.core.index; diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilder.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilder.java new file mode 100644 index 0000000000000..a71a7874f178e --- /dev/null +++ b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilder.java @@ -0,0 +1,330 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.correlation.core.index.query; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.search.Query; +import org.opensearch.common.ParsingException; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.index.mapper.NumberFieldMapper; +import org.opensearch.index.query.AbstractQueryBuilder; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.plugin.correlation.core.index.mapper.VectorFieldMapper; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Locale; +import java.util.Objects; + +/** + * Constructs a query to get correlated events or documents for a particular event or document. + * + * @opensearch.internal + */ +public class CorrelationQueryBuilder extends AbstractQueryBuilder { + + private static final Logger log = LogManager.getLogger(CorrelationQueryBuilder.class); + protected static final ParseField VECTOR_FIELD = new ParseField("vector"); + protected static final ParseField K_FIELD = new ParseField("k"); + protected static final ParseField FILTER_FIELD = new ParseField("filter"); + /** + * max number of neighbors that can be retrieved. + */ + public static int K_MAX = 10000; + + /** + * name of the query + */ + public static final ParseField NAME_FIELD = new ParseField("correlation"); + + private String fieldName; + private float[] vector; + private int k = 0; + private double boost; + private QueryBuilder filter; + + private CorrelationQueryBuilder() {} + + /** + * parameterized ctor for CorrelationQueryBuilder + * @param fieldName field name for query + * @param vector query vector + * @param k number of nearby neighbors + */ + public CorrelationQueryBuilder(String fieldName, float[] vector, int k) { + this(fieldName, vector, k, null); + } + + /** + * parameterized ctor for CorrelationQueryBuilder + * @param fieldName field name for query + * @param vector query vector + * @param k number of nearby neighbors + * @param filter optional filter query + */ + public CorrelationQueryBuilder(String fieldName, float[] vector, int k, QueryBuilder filter) { + if (Strings.isNullOrEmpty(fieldName)) { + throw new IllegalArgumentException( + String.format(Locale.getDefault(), "[%s] requires fieldName", NAME_FIELD.getPreferredName()) + ); + } + if (vector == null) { + throw new IllegalArgumentException( + String.format(Locale.getDefault(), "[%s] requires query vector", NAME_FIELD.getPreferredName()) + ); + } + if (vector.length == 0) { + throw new IllegalArgumentException( + String.format(Locale.getDefault(), "[%s] query vector is empty", NAME_FIELD.getPreferredName()) + ); + } + if (k <= 0) { + throw new IllegalArgumentException(String.format(Locale.getDefault(), "[%s] requires k > 0", NAME_FIELD.getPreferredName())); + } + if (k > K_MAX) { + throw new IllegalArgumentException(String.format(Locale.getDefault(), "[%s] requires k <= ", K_MAX)); + } + + this.fieldName = fieldName; + this.vector = vector; + this.k = k; + this.filter = filter; + } + + /** + * parameterized ctor for CorrelationQueryBuilder + * @param sin StreamInput + * @throws IOException IOException + */ + public CorrelationQueryBuilder(StreamInput sin) throws IOException { + super(sin); + this.fieldName = sin.readString(); + this.vector = sin.readFloatArray(); + this.k = sin.readInt(); + this.filter = sin.readOptionalNamedWriteable(QueryBuilder.class); + } + + private static float[] objectsToFloats(List objs) { + float[] vector = new float[objs.size()]; + for (int i = 0; i < objs.size(); ++i) { + vector[i] = ((Number) objs.get(i)).floatValue(); + } + return vector; + } + + /** + * parse into CorrelationQueryBuilder + * @param xcp XContentParser + * @return CorrelationQueryBuilder + */ + public static CorrelationQueryBuilder parse(XContentParser xcp) throws IOException { + String fieldName = null; + List vector = null; + float boost = AbstractQueryBuilder.DEFAULT_BOOST; + + int k = 0; + QueryBuilder filter = null; + String queryName = null; + String currentFieldName = null; + XContentParser.Token token; + while ((token = xcp.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = xcp.currentName(); + } else if (token == XContentParser.Token.START_OBJECT) { + throwParsingExceptionOnMultipleFields(NAME_FIELD.getPreferredName(), xcp.getTokenLocation(), fieldName, currentFieldName); + fieldName = currentFieldName; + while ((token = xcp.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = xcp.currentName(); + } else if (token.isValue() || token == XContentParser.Token.START_ARRAY) { + if (VECTOR_FIELD.match(currentFieldName, xcp.getDeprecationHandler())) { + vector = xcp.list(); + } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, xcp.getDeprecationHandler())) { + boost = xcp.floatValue(); + } else if (K_FIELD.match(currentFieldName, xcp.getDeprecationHandler())) { + k = (Integer) NumberFieldMapper.NumberType.INTEGER.parse(xcp.objectBytes(), false); + } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, xcp.getDeprecationHandler())) { + queryName = xcp.text(); + } else { + throw new ParsingException( + xcp.getTokenLocation(), + "[" + NAME_FIELD.getPreferredName() + "] query does not support [" + currentFieldName + "]" + ); + } + } else if (token == XContentParser.Token.START_OBJECT) { + String tokenName = xcp.currentName(); + if (FILTER_FIELD.getPreferredName().equals(tokenName)) { + filter = parseInnerQueryBuilder(xcp); + } else { + throw new ParsingException( + xcp.getTokenLocation(), + "[" + NAME_FIELD.getPreferredName() + "] unknown token [" + token + "]" + ); + } + } else { + throw new ParsingException( + xcp.getTokenLocation(), + "[" + NAME_FIELD.getPreferredName() + "] unknown token [" + token + "] after [" + currentFieldName + "]" + ); + } + } + } else { + throwParsingExceptionOnMultipleFields(NAME_FIELD.getPreferredName(), xcp.getTokenLocation(), fieldName, xcp.currentName()); + fieldName = xcp.currentName(); + vector = xcp.list(); + } + } + + assert vector != null; + CorrelationQueryBuilder correlationQueryBuilder = new CorrelationQueryBuilder(fieldName, objectsToFloats(vector), k, filter); + correlationQueryBuilder.queryName(queryName); + correlationQueryBuilder.boost(boost); + return correlationQueryBuilder; + } + + public void setFieldName(String fieldName) { + this.fieldName = fieldName; + } + + /** + * get field name + * @return field name + */ + public String fieldName() { + return fieldName; + } + + public void setVector(float[] vector) { + this.vector = vector; + } + + /** + * get query vector + * @return query vector + */ + public Object vector() { + return vector; + } + + public void setK(int k) { + this.k = k; + } + + /** + * get number of nearby neighbors + * @return number of nearby neighbors + */ + public int getK() { + return k; + } + + public void setBoost(double boost) { + this.boost = boost; + } + + /** + * get boost + * @return boost + */ + public double getBoost() { + return boost; + } + + public void setFilter(QueryBuilder filter) { + this.filter = filter; + } + + /** + * get optional filter + * @return optional filter + */ + public QueryBuilder getFilter() { + return filter; + } + + @Override + protected void doWriteTo(StreamOutput out) throws IOException { + out.writeString(fieldName); + out.writeFloatArray(vector); + out.writeInt(k); + out.writeOptionalNamedWriteable(filter); + } + + @Override + public void doXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(fieldName); + + builder.field(VECTOR_FIELD.getPreferredName(), vector); + builder.field(K_FIELD.getPreferredName(), k); + if (filter != null) { + builder.field(FILTER_FIELD.getPreferredName(), filter); + } + printBoostAndQueryName(builder); + builder.endObject(); + } + + @Override + protected Query doToQuery(QueryShardContext context) throws IOException { + MappedFieldType mappedFieldType = context.fieldMapper(fieldName); + + if (!(mappedFieldType instanceof VectorFieldMapper.CorrelationVectorFieldType)) { + throw new IllegalArgumentException(String.format(Locale.getDefault(), "Field '%s' is not knn_vector type.", this.fieldName)); + } + + VectorFieldMapper.CorrelationVectorFieldType correlationVectorFieldType = + (VectorFieldMapper.CorrelationVectorFieldType) mappedFieldType; + int fieldDimension = correlationVectorFieldType.getDimension(); + + if (fieldDimension != vector.length) { + throw new IllegalArgumentException( + String.format( + Locale.getDefault(), + "Query vector has invalid dimension: %d. Dimension should be: %d", + vector.length, + fieldDimension + ) + ); + } + + String indexName = context.index().getName(); + CorrelationQueryFactory.CreateQueryRequest createQueryRequest = new CorrelationQueryFactory.CreateQueryRequest( + indexName, + this.fieldName, + this.vector, + this.k, + this.filter, + context + ); + return CorrelationQueryFactory.create(createQueryRequest); + } + + @Override + protected boolean doEquals(CorrelationQueryBuilder other) { + return Objects.equals(fieldName, other.fieldName) && Arrays.equals(vector, other.vector) && Objects.equals(k, other.k); + } + + @Override + protected int doHashCode() { + return Objects.hash(fieldName, vector, k); + } + + @Override + public String getWriteableName() { + return NAME_FIELD.getPreferredName(); + } +} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryFactory.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryFactory.java new file mode 100644 index 0000000000000..d5db299bfa3a5 --- /dev/null +++ b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryFactory.java @@ -0,0 +1,142 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.correlation.core.index.query; + +import org.apache.lucene.search.KnnFloatVectorQuery; +import org.apache.lucene.search.Query; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryShardContext; + +import java.io.IOException; +import java.util.Optional; + +/** + * CorrelationQueryFactory util class is used to construct a Lucene KnnFloatVectorQuery. + * + * @opensearch.internal + */ +public class CorrelationQueryFactory { + + /** + * static method which takes input params to construct a Lucene KnnFloatVectorQuery. + * @param createQueryRequest object parameter containing inputs for constructing Lucene KnnFloatVectorQuery. + * @return generic Lucene Query object + */ + public static Query create(CreateQueryRequest createQueryRequest) { + final String indexName = createQueryRequest.getIndexName(); + final String fieldName = createQueryRequest.getFieldName(); + final int k = createQueryRequest.getK(); + final float[] vector = createQueryRequest.getVector(); + + if (createQueryRequest.getFilter().isPresent()) { + final QueryShardContext context = createQueryRequest.getContext() + .orElseThrow(() -> new RuntimeException("Shard context cannot be null")); + + try { + final Query filterQuery = createQueryRequest.getFilter().get().toQuery(context); + return new KnnFloatVectorQuery(fieldName, vector, k, filterQuery); + } catch (IOException ex) { + throw new RuntimeException("Cannot create knn query with filter", ex); + } + } + return new KnnFloatVectorQuery(fieldName, vector, k); + } + + /** + * class containing params to construct a Lucene KnnFloatVectorQuery. + * + * @opensearch.internal + */ + public static class CreateQueryRequest { + private String indexName; + + private String fieldName; + + private float[] vector; + + private int k; + + private QueryBuilder filter; + + private QueryShardContext context; + + /** + * Parameterized ctor for CreateQueryRequest + * @param indexName index name + * @param fieldName field name + * @param vector query vector + * @param k number of nearby neighbors + * @param filter additional filter query + * @param context QueryShardContext + */ + public CreateQueryRequest( + String indexName, + String fieldName, + float[] vector, + int k, + QueryBuilder filter, + QueryShardContext context + ) { + this.indexName = indexName; + this.fieldName = fieldName; + this.vector = vector; + this.k = k; + this.filter = filter; + this.context = context; + } + + /** + * get index name + * @return get index name + */ + public String getIndexName() { + return indexName; + } + + /** + * get field name + * @return get field name + */ + public String getFieldName() { + return fieldName; + } + + /** + * get vector + * @return get vector + */ + public float[] getVector() { + return vector; + } + + /** + * get number of nearby neighbors + * @return number of nearby neighbors + */ + public int getK() { + return k; + } + + /** + * get optional filter query + * @return get optional filter query + */ + public Optional getFilter() { + return Optional.ofNullable(filter); + } + + /** + * get optional query shard context + * @return get optional query shard context + */ + public Optional getContext() { + return Optional.ofNullable(context); + } + } +} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/query/package-info.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/query/package-info.java new file mode 100644 index 0000000000000..2cf5db786a60f --- /dev/null +++ b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/query/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * correlation query builder package + */ +package org.opensearch.plugin.correlation.core.index.query; diff --git a/plugins/events-correlation-engine/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec b/plugins/events-correlation-engine/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec new file mode 100644 index 0000000000000..598a3b6af73c2 --- /dev/null +++ b/plugins/events-correlation-engine/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec @@ -0,0 +1 @@ +org.opensearch.plugin.correlation.core.index.codec.correlation950.CorrelationCodec diff --git a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/CorrelationParamsContextTests.java b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/CorrelationParamsContextTests.java new file mode 100644 index 0000000000000..7b9d4e31d7e3a --- /dev/null +++ b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/CorrelationParamsContextTests.java @@ -0,0 +1,170 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.correlation.core.index; + +import org.apache.lucene.index.VectorSimilarityFunction; +import org.junit.Assert; +import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.mapper.MapperParsingException; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.opensearch.plugin.correlation.core.index.CorrelationParamsContext.PARAMETERS; +import static org.opensearch.plugin.correlation.core.index.CorrelationParamsContext.VECTOR_SIMILARITY_FUNCTION; + +/** + * Unit tests for CorrelationsParamsContext + */ +public class CorrelationParamsContextTests extends OpenSearchTestCase { + + /** + * Test reading from and writing to streams + */ + public void testStreams() throws IOException { + int efConstruction = 321; + int m = 12; + + Map parameters = new HashMap<>(); + parameters.put("m", m); + parameters.put("ef_construction", efConstruction); + + CorrelationParamsContext context = new CorrelationParamsContext(VectorSimilarityFunction.EUCLIDEAN, parameters); + + BytesStreamOutput streamOutput = new BytesStreamOutput(); + context.writeTo(streamOutput); + + CorrelationParamsContext copy = new CorrelationParamsContext(streamOutput.bytes().streamInput()); + Assert.assertEquals(context.getSimilarityFunction(), copy.getSimilarityFunction()); + Assert.assertEquals(context.getParameters(), copy.getParameters()); + } + + /** + * test get vector similarity function + */ + public void testVectorSimilarityFunction() { + int efConstruction = 321; + int m = 12; + + Map parameters = new HashMap<>(); + parameters.put("m", m); + parameters.put("ef_construction", efConstruction); + + CorrelationParamsContext context = new CorrelationParamsContext(VectorSimilarityFunction.EUCLIDEAN, parameters); + Assert.assertEquals(VectorSimilarityFunction.EUCLIDEAN, context.getSimilarityFunction()); + } + + /** + * test get parameters + */ + public void testParameters() { + int efConstruction = 321; + int m = 12; + + Map parameters = new HashMap<>(); + parameters.put("m", m); + parameters.put("ef_construction", efConstruction); + + CorrelationParamsContext context = new CorrelationParamsContext(VectorSimilarityFunction.EUCLIDEAN, parameters); + Assert.assertEquals(parameters, context.getParameters()); + } + + /** + * test parse method with invalid input + * @throws IOException IOException + */ + public void testParse_Invalid() throws IOException { + // Invalid input type + Integer invalidIn = 12; + expectThrows(MapperParsingException.class, () -> CorrelationParamsContext.parse(invalidIn)); + + // Invalid vector similarity function + XContentBuilder xContentBuilder = XContentFactory.jsonBuilder() + .startObject() + .field(CorrelationParamsContext.VECTOR_SIMILARITY_FUNCTION, 0) + .endObject(); + + final Map in2 = xContentBuilderToMap(xContentBuilder); + expectThrows(MapperParsingException.class, () -> CorrelationParamsContext.parse(in2)); + + // Invalid parameters + xContentBuilder = XContentFactory.jsonBuilder().startObject().field(PARAMETERS, 0).endObject(); + + final Map in4 = xContentBuilderToMap(xContentBuilder); + expectThrows(MapperParsingException.class, () -> CorrelationParamsContext.parse(in4)); + } + + /** + * test parse with null parameters + * @throws IOException IOException + */ + public void testParse_NullParameters() throws IOException { + XContentBuilder xContentBuilder = XContentFactory.jsonBuilder() + .startObject() + .field(VECTOR_SIMILARITY_FUNCTION, VectorSimilarityFunction.EUCLIDEAN) + .field(PARAMETERS, (String) null) + .endObject(); + Map in = xContentBuilderToMap(xContentBuilder); + Assert.assertThrows(MapperParsingException.class, () -> { CorrelationParamsContext.parse(in); }); + } + + /** + * test parse method + * @throws IOException IOException + */ + public void testParse_Valid() throws IOException { + XContentBuilder xContentBuilder = XContentFactory.jsonBuilder() + .startObject() + .field(VECTOR_SIMILARITY_FUNCTION, VectorSimilarityFunction.EUCLIDEAN) + .startObject(PARAMETERS) + .field("m", 2) + .field("ef_construction", 128) + .endObject() + .endObject(); + + Map in = xContentBuilderToMap(xContentBuilder); + CorrelationParamsContext context = CorrelationParamsContext.parse(in); + Assert.assertEquals(VectorSimilarityFunction.EUCLIDEAN, context.getSimilarityFunction()); + Assert.assertEquals(Map.of("m", 2, "ef_construction", 128), context.getParameters()); + } + + /** + * test toXContent method + * @throws IOException IOException + */ + public void testToXContent() throws IOException { + XContentBuilder xContentBuilder = XContentFactory.jsonBuilder() + .startObject() + .field(VECTOR_SIMILARITY_FUNCTION, VectorSimilarityFunction.EUCLIDEAN) + .startObject(PARAMETERS) + .field("m", 2) + .field("ef_construction", 128) + .endObject() + .endObject(); + + Map in = xContentBuilderToMap(xContentBuilder); + CorrelationParamsContext context = CorrelationParamsContext.parse(in); + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder = context.toXContent(builder, ToXContent.EMPTY_PARAMS); + + Map out = xContentBuilderToMap(builder); + Assert.assertEquals(VectorSimilarityFunction.EUCLIDEAN.name(), out.get(VECTOR_SIMILARITY_FUNCTION)); + } + + private Map xContentBuilderToMap(XContentBuilder xContentBuilder) { + return XContentHelper.convertToMap(BytesReference.bytes(xContentBuilder), true, xContentBuilder.contentType()).v2(); + } +} diff --git a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/VectorFieldTests.java b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/VectorFieldTests.java new file mode 100644 index 0000000000000..00cec228de720 --- /dev/null +++ b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/VectorFieldTests.java @@ -0,0 +1,83 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.correlation.core.index; + +import org.apache.lucene.document.FieldType; +import org.junit.Assert; +import org.opensearch.ExceptionsHelper; +import org.opensearch.OpenSearchException; +import org.opensearch.common.Randomness; +import org.opensearch.common.io.stream.BytesStreamInput; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Random; + +/** + * Unit tests for VectorField + */ +public class VectorFieldTests extends OpenSearchTestCase { + + private final Random random = Randomness.get(); + + /** + * test VectorField ctor + */ + public void testVectorField_ctor() { + VectorField field = new VectorField("test-field", new float[] { 1.0f, 1.0f }, new FieldType()); + Assert.assertEquals("test-field", field.name()); + } + + /** + * test float vector to array serializer + * @throws IOException IOException + */ + public void testVectorAsArraySerializer() throws IOException { + final float[] vector = getArrayOfRandomFloats(20); + + final BytesStreamOutput objectStream = new BytesStreamOutput(); + objectStream.writeFloatArray(vector); + final byte[] serializedVector = objectStream.bytes().toBytesRef().bytes; + + final byte[] actualSerializedVector = VectorField.floatToByteArray(vector); + + Assert.assertNotNull(actualSerializedVector); + Assert.assertArrayEquals(serializedVector, actualSerializedVector); + + final float[] actualDeserializedVector = byteToFloatArray(actualSerializedVector); + Assert.assertNotNull(actualDeserializedVector); + Assert.assertArrayEquals(vector, actualDeserializedVector, 0.1f); + } + + /** + * test byte array to float vector failures + */ + public void testByteToFloatArrayFailures() { + final byte[] serializedVector = "test-dummy".getBytes(StandardCharsets.UTF_8); + expectThrows(OpenSearchException.class, () -> { byteToFloatArray(serializedVector); }); + } + + private float[] getArrayOfRandomFloats(int length) { + float[] vector = new float[length]; + for (int i = 0; i < 20; ++i) { + vector[i] = random.nextFloat(); + } + return vector; + } + + private static float[] byteToFloatArray(byte[] byteStream) { + try (BytesStreamInput objectStream = new BytesStreamInput(byteStream)) { + return objectStream.readFloatArray(); + } catch (IOException ex) { + throw ExceptionsHelper.convertToOpenSearchException(ex); + } + } +} diff --git a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/codec/correlation950/CorrelationCodecTests.java b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/codec/correlation950/CorrelationCodecTests.java new file mode 100644 index 0000000000000..ac859773f6350 --- /dev/null +++ b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/codec/correlation950/CorrelationCodecTests.java @@ -0,0 +1,120 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.correlation.core.index.codec.correlation950; + +import org.apache.lucene.codecs.Codec; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.KnnFloatVectorField; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.SerialMergeScheduler; +import org.apache.lucene.index.VectorSimilarityFunction; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.opensearch.index.mapper.MapperService; +import org.opensearch.plugin.correlation.core.index.CorrelationParamsContext; +import org.opensearch.plugin.correlation.core.index.mapper.VectorFieldMapper; +import org.opensearch.plugin.correlation.core.index.query.CorrelationQueryFactory; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Map; +import java.util.Optional; +import java.util.function.Function; + +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static org.mockito.Mockito.spy; +import static org.opensearch.plugin.correlation.core.index.codec.BasePerFieldCorrelationVectorsFormat.METHOD_PARAMETER_EF_CONSTRUCTION; +import static org.opensearch.plugin.correlation.core.index.codec.BasePerFieldCorrelationVectorsFormat.METHOD_PARAMETER_M; +import static org.opensearch.plugin.correlation.core.index.codec.CorrelationCodecVersion.V_9_5_0; + +/** + * Unit tests for custom correlation codec + */ +public class CorrelationCodecTests extends OpenSearchTestCase { + + private static final String FIELD_NAME_ONE = "test_vector_one"; + private static final String FIELD_NAME_TWO = "test_vector_two"; + + /** + * test correlation vector index + * @throws Exception Exception + */ + public void testCorrelationVectorIndex() throws Exception { + Function perFieldCorrelationVectorsProvider = + mapperService -> new PerFieldCorrelationVectorsFormat(Optional.of(mapperService)); + Function correlationCodecProvider = (correlationVectorsFormat -> new CorrelationCodec( + V_9_5_0.getDefaultCodecDelegate(), + correlationVectorsFormat + )); + testCorrelationVectorIndex(correlationCodecProvider, perFieldCorrelationVectorsProvider); + } + + private void testCorrelationVectorIndex( + final Function codecProvider, + final Function perFieldCorrelationVectorsProvider + ) throws Exception { + final MapperService mapperService = mock(MapperService.class); + final CorrelationParamsContext correlationParamsContext = new CorrelationParamsContext( + VectorSimilarityFunction.EUCLIDEAN, + Map.of(METHOD_PARAMETER_M, 16, METHOD_PARAMETER_EF_CONSTRUCTION, 256) + ); + + final VectorFieldMapper.CorrelationVectorFieldType mappedFieldType1 = new VectorFieldMapper.CorrelationVectorFieldType( + FIELD_NAME_ONE, + Map.of(), + 3, + correlationParamsContext + ); + final VectorFieldMapper.CorrelationVectorFieldType mappedFieldType2 = new VectorFieldMapper.CorrelationVectorFieldType( + FIELD_NAME_TWO, + Map.of(), + 2, + correlationParamsContext + ); + when(mapperService.fieldType(eq(FIELD_NAME_ONE))).thenReturn(mappedFieldType1); + when(mapperService.fieldType(eq(FIELD_NAME_TWO))).thenReturn(mappedFieldType2); + + var perFieldCorrelationVectorsFormatSpy = spy(perFieldCorrelationVectorsProvider.apply(mapperService)); + final Codec codec = codecProvider.apply(perFieldCorrelationVectorsFormatSpy); + + Directory dir = newFSDirectory(createTempDir()); + IndexWriterConfig iwc = newIndexWriterConfig(); + iwc.setMergeScheduler(new SerialMergeScheduler()); + iwc.setCodec(codec); + + final FieldType luceneFieldType = KnnFloatVectorField.createFieldType(3, VectorSimilarityFunction.EUCLIDEAN); + float[] array = { 1.0f, 3.0f, 4.0f }; + KnnFloatVectorField vectorField = new KnnFloatVectorField(FIELD_NAME_ONE, array, luceneFieldType); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc); + Document doc = new Document(); + doc.add(vectorField); + writer.addDocument(doc); + writer.commit(); + IndexReader reader = writer.getReader(); + writer.close(); + + verify(perFieldCorrelationVectorsFormatSpy).getKnnVectorsFormatForField(eq(FIELD_NAME_ONE)); + + IndexSearcher searcher = new IndexSearcher(reader); + Query query = CorrelationQueryFactory.create( + new CorrelationQueryFactory.CreateQueryRequest("dummy", FIELD_NAME_ONE, new float[] { 1.0f, 0.0f, 0.0f }, 1, null, null) + ); + + assertEquals(1, searcher.count(query)); + + reader.close(); + dir.close(); + } +} diff --git a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapperTests.java b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapperTests.java new file mode 100644 index 0000000000000..82c2cc1a07aaf --- /dev/null +++ b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapperTests.java @@ -0,0 +1,309 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.correlation.core.index.mapper; + +import org.apache.lucene.document.KnnFloatVectorField; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.VectorSimilarityFunction; +import org.apache.lucene.search.FieldExistsQuery; +import org.junit.Assert; +import org.mockito.Mockito; +import org.opensearch.Version; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.Explicit; +import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.settings.IndexScopedSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.mapper.ContentPath; +import org.opensearch.index.mapper.FieldMapper; +import org.opensearch.index.mapper.Mapper; +import org.opensearch.index.mapper.MapperParsingException; +import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.mapper.ParseContext; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.index.query.QueryShardException; +import org.opensearch.plugin.correlation.core.index.CorrelationParamsContext; +import org.opensearch.search.lookup.SearchLookup; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Optional; + +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Unit tests for correlation vector field mapper + */ +public class CorrelationVectorFieldMapperTests extends OpenSearchTestCase { + + private static final String CORRELATION_VECTOR_TYPE = "correlation_vector"; + private static final String DIMENSION_FIELD_NAME = "dimension"; + private static final String TYPE_FIELD_NAME = "type"; + + /** + * test builder construction from parse of correlation params context + * @throws IOException IOException + */ + public void testBuilder_parse_fromCorrelationParamsContext() throws IOException { + String fieldName = "test-field-name"; + String indexName = "test-index-name"; + Settings settings = Settings.builder().put(settings(Version.CURRENT).build()).build(); + + VectorFieldMapper.TypeParser typeParser = new VectorFieldMapper.TypeParser(); + + int efConstruction = 321; + int m = 12; + int dimension = 10; + XContentBuilder xContentBuilder = XContentFactory.jsonBuilder() + .startObject() + .field(TYPE_FIELD_NAME, CORRELATION_VECTOR_TYPE) + .field(DIMENSION_FIELD_NAME, dimension) + .startObject("correlation_ctx") + .field("similarityFunction", VectorSimilarityFunction.EUCLIDEAN.name()) + .startObject("parameters") + .field("m", m) + .field("ef_construction", efConstruction) + .endObject() + .endObject() + .endObject(); + + VectorFieldMapper.Builder builder = (VectorFieldMapper.Builder) typeParser.parse( + fieldName, + XContentHelper.convertToMap(BytesReference.bytes(xContentBuilder), true, xContentBuilder.contentType()).v2(), + buildParserContext(indexName, settings) + ); + Mapper.BuilderContext builderContext = new Mapper.BuilderContext(settings, new ContentPath()); + builder.build(builderContext); + + Assert.assertEquals(VectorSimilarityFunction.EUCLIDEAN, builder.correlationParamsContext.getValue().getSimilarityFunction()); + Assert.assertEquals(321, builder.correlationParamsContext.getValue().getParameters().get("ef_construction")); + + XContentBuilder xContentBuilderEmptyParams = XContentFactory.jsonBuilder() + .startObject() + .field(TYPE_FIELD_NAME, CORRELATION_VECTOR_TYPE) + .field(DIMENSION_FIELD_NAME, dimension) + .startObject("correlation_ctx") + .field("similarityFunction", VectorSimilarityFunction.EUCLIDEAN.name()) + .endObject() + .endObject(); + + VectorFieldMapper.Builder builderEmptyParams = (VectorFieldMapper.Builder) typeParser.parse( + fieldName, + XContentHelper.convertToMap(BytesReference.bytes(xContentBuilderEmptyParams), true, xContentBuilderEmptyParams.contentType()) + .v2(), + buildParserContext(indexName, settings) + ); + + Assert.assertEquals( + VectorSimilarityFunction.EUCLIDEAN, + builderEmptyParams.correlationParamsContext.getValue().getSimilarityFunction() + ); + Assert.assertTrue(builderEmptyParams.correlationParamsContext.getValue().getParameters().isEmpty()); + } + + /** + * test type parser construction throw error for invalid dimension of correlation vectors + * @throws IOException IOException + */ + public void testTypeParser_parse_fromCorrelationParamsContext_InvalidDimension() throws IOException { + String fieldName = "test-field-name"; + String indexName = "test-index-name"; + Settings settings = Settings.builder().put(settings(Version.CURRENT).build()).build(); + + VectorFieldMapper.TypeParser typeParser = new VectorFieldMapper.TypeParser(); + + int efConstruction = 321; + int m = 12; + XContentBuilder xContentBuilder = XContentFactory.jsonBuilder() + .startObject() + .field(TYPE_FIELD_NAME, CORRELATION_VECTOR_TYPE) + .field(DIMENSION_FIELD_NAME, 2000) + .startObject("correlation_ctx") + .field("similarityFunction", VectorSimilarityFunction.EUCLIDEAN.name()) + .startObject("parameters") + .field("m", m) + .field("ef_construction", efConstruction) + .endObject() + .endObject() + .endObject(); + + VectorFieldMapper.Builder builder = (VectorFieldMapper.Builder) typeParser.parse( + fieldName, + XContentHelper.convertToMap(BytesReference.bytes(xContentBuilder), true, xContentBuilder.contentType()).v2(), + buildParserContext(indexName, settings) + ); + + expectThrows(IllegalArgumentException.class, () -> builder.build(new Mapper.BuilderContext(settings, new ContentPath()))); + } + + /** + * test type parser construction error for invalid vector similarity function + * @throws IOException IOException + */ + public void testTypeParser_parse_fromCorrelationParamsContext_InvalidVectorSimilarityFunction() throws IOException { + String fieldName = "test-field-name"; + String indexName = "test-index-name"; + Settings settings = Settings.builder().put(settings(Version.CURRENT).build()).build(); + + VectorFieldMapper.TypeParser typeParser = new VectorFieldMapper.TypeParser(); + + int efConstruction = 321; + int m = 12; + XContentBuilder xContentBuilder = XContentFactory.jsonBuilder() + .startObject() + .field(TYPE_FIELD_NAME, CORRELATION_VECTOR_TYPE) + .field(DIMENSION_FIELD_NAME, 2000) + .startObject("correlation_ctx") + .field("similarityFunction", "invalid") + .startObject("parameters") + .field("m", m) + .field("ef_construction", efConstruction) + .endObject() + .endObject() + .endObject(); + + expectThrows( + MapperParsingException.class, + () -> typeParser.parse( + fieldName, + XContentHelper.convertToMap(BytesReference.bytes(xContentBuilder), true, xContentBuilder.contentType()).v2(), + buildParserContext(indexName, settings) + ) + ); + } + + /** + * test parseCreateField in CorrelationVectorFieldMapper + * @throws IOException + */ + public void testCorrelationVectorFieldMapper_parseCreateField() throws IOException { + String fieldName = "test-field-name"; + int dimension = 10; + float[] testVector = createInitializedFloatArray(dimension, 1.0f); + CorrelationParamsContext correlationParamsContext = new CorrelationParamsContext(VectorSimilarityFunction.EUCLIDEAN, Map.of()); + + VectorFieldMapper.CorrelationVectorFieldType correlationVectorFieldType = new VectorFieldMapper.CorrelationVectorFieldType( + fieldName, + Map.of(), + dimension, + correlationParamsContext + ); + + CorrelationVectorFieldMapper.CreateLuceneFieldMapperInput input = new CorrelationVectorFieldMapper.CreateLuceneFieldMapperInput( + fieldName, + correlationVectorFieldType, + FieldMapper.MultiFields.empty(), + FieldMapper.CopyTo.empty(), + new Explicit<>(true, true), + false, + false, + correlationParamsContext + ); + + ParseContext.Document document = new ParseContext.Document(); + ContentPath contentPath = new ContentPath(); + ParseContext parseContext = mock(ParseContext.class); + when(parseContext.doc()).thenReturn(document); + when(parseContext.path()).thenReturn(contentPath); + + CorrelationVectorFieldMapper correlationVectorFieldMapper = Mockito.spy(new CorrelationVectorFieldMapper(input)); + doReturn(Optional.of(testVector)).when(correlationVectorFieldMapper).getFloatsFromContext(parseContext, dimension); + + correlationVectorFieldMapper.parseCreateField(parseContext, dimension); + + List fields = document.getFields(); + assertEquals(1, fields.size()); + IndexableField field = fields.get(0); + + Assert.assertTrue(field instanceof KnnFloatVectorField); + KnnFloatVectorField knnFloatVectorField = (KnnFloatVectorField) field; + Assert.assertArrayEquals(testVector, knnFloatVectorField.vectorValue(), 0.001f); + } + + /** + * test CorrelationVectorFieldType subclass + */ + public void testCorrelationVectorFieldType() { + String fieldName = "test-field-name"; + int dimension = 10; + QueryShardContext context = mock(QueryShardContext.class); + SearchLookup searchLookup = mock(SearchLookup.class); + + VectorFieldMapper.CorrelationVectorFieldType correlationVectorFieldType = new VectorFieldMapper.CorrelationVectorFieldType( + fieldName, + Map.of(), + dimension + ); + Assert.assertThrows(QueryShardException.class, () -> { correlationVectorFieldType.termQuery(new Object(), context); }); + Assert.assertThrows( + UnsupportedOperationException.class, + () -> { correlationVectorFieldType.valueFetcher(context, searchLookup, ""); } + ); + Assert.assertTrue(correlationVectorFieldType.existsQuery(context) instanceof FieldExistsQuery); + Assert.assertEquals(VectorFieldMapper.CONTENT_TYPE, correlationVectorFieldType.typeName()); + } + + /** + * test constants in VectorFieldMapper + */ + public void testVectorFieldMapperConstants() { + Assert.assertNotNull(VectorFieldMapper.Defaults.IGNORE_MALFORMED); + Assert.assertNotNull(VectorFieldMapper.Names.IGNORE_MALFORMED); + } + + private IndexMetadata buildIndexMetaData(String index, Settings settings) { + return IndexMetadata.builder(index) + .settings(settings) + .numberOfShards(1) + .numberOfReplicas(0) + .version(7) + .mappingVersion(0) + .settingsVersion(0) + .aliasesVersion(0) + .creationDate(0) + .build(); + } + + private Mapper.TypeParser.ParserContext buildParserContext(String index, Settings settings) { + IndexSettings indexSettings = new IndexSettings( + buildIndexMetaData(index, settings), + Settings.EMPTY, + new IndexScopedSettings(Settings.EMPTY, new HashSet<>(IndexScopedSettings.BUILT_IN_INDEX_SETTINGS)) + ); + + MapperService mapperService = mock(MapperService.class); + when(mapperService.getIndexSettings()).thenReturn(indexSettings); + + return new Mapper.TypeParser.ParserContext( + null, + mapperService, + type -> new VectorFieldMapper.TypeParser(), + Version.CURRENT, + null, + null, + null + ); + } + + private static float[] createInitializedFloatArray(int dimension, float value) { + float[] array = new float[dimension]; + Arrays.fill(array, value); + return array; + } +} diff --git a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilderTests.java b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilderTests.java new file mode 100644 index 0000000000000..fd3c7220aad74 --- /dev/null +++ b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilderTests.java @@ -0,0 +1,268 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.correlation.core.index.query; + +import org.apache.lucene.search.KnnFloatVectorQuery; +import org.junit.Assert; +import org.opensearch.Version; +import org.opensearch.cluster.ClusterModule; +import org.opensearch.common.Strings; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.opensearch.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.index.Index; +import org.opensearch.index.mapper.NumberFieldMapper; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryBuilders; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.plugin.correlation.core.index.mapper.VectorFieldMapper; +import org.opensearch.plugins.SearchPlugin; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.List; +import java.util.Optional; + +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Unit tests for Correlation Query Builder + */ +public class CorrelationQueryBuilderTests extends OpenSearchTestCase { + + private static final String FIELD_NAME = "myvector"; + private static final int K = 1; + private static final TermQueryBuilder TERM_QUERY = QueryBuilders.termQuery("field", "value"); + private static final float[] QUERY_VECTOR = new float[] { 1.0f, 2.0f, 3.0f, 4.0f }; + + /** + * test invalid number of nearby neighbors + */ + public void testInvalidK() { + float[] queryVector = { 1.0f, 1.0f }; + + expectThrows(IllegalArgumentException.class, () -> new CorrelationQueryBuilder(FIELD_NAME, queryVector, -K)); + expectThrows(IllegalArgumentException.class, () -> new CorrelationQueryBuilder(FIELD_NAME, queryVector, 0)); + expectThrows( + IllegalArgumentException.class, + () -> new CorrelationQueryBuilder(FIELD_NAME, queryVector, CorrelationQueryBuilder.K_MAX + 1) + ); + } + + /** + * test empty vector scenario + */ + public void testEmptyVector() { + final float[] queryVector = null; + expectThrows(IllegalArgumentException.class, () -> new CorrelationQueryBuilder(FIELD_NAME, queryVector, 1)); + final float[] queryVector1 = new float[] {}; + expectThrows(IllegalArgumentException.class, () -> new CorrelationQueryBuilder(FIELD_NAME, queryVector1, 1)); + } + + /** + * test serde with xcontent + * @throws IOException IOException + */ + public void testFromXContent() throws IOException { + CorrelationQueryBuilder correlationQueryBuilder = new CorrelationQueryBuilder(FIELD_NAME, QUERY_VECTOR, K); + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + builder.startObject(correlationQueryBuilder.fieldName()); + builder.field(CorrelationQueryBuilder.VECTOR_FIELD.getPreferredName(), correlationQueryBuilder.vector()); + builder.field(CorrelationQueryBuilder.K_FIELD.getPreferredName(), correlationQueryBuilder.getK()); + builder.endObject(); + builder.endObject(); + XContentParser contentParser = createParser(builder); + contentParser.nextToken(); + CorrelationQueryBuilder actualBuilder = CorrelationQueryBuilder.parse(contentParser); + Assert.assertEquals(actualBuilder, correlationQueryBuilder); + } + + /** + * test serde with xcontent + * @throws IOException IOException + */ + public void testFromXContentFromString() throws IOException { + String correlationQuery = "{\n" + + " \"myvector\" : {\n" + + " \"vector\" : [\n" + + " 1.0,\n" + + " 2.0,\n" + + " 3.0,\n" + + " 4.0\n" + + " ],\n" + + " \"k\" : 1,\n" + + " \"boost\" : 1.0\n" + + " }\n" + + "}"; + XContentParser contentParser = createParser(JsonXContent.jsonXContent, correlationQuery); + contentParser.nextToken(); + CorrelationQueryBuilder actualBuilder = CorrelationQueryBuilder.parse(contentParser); + Assert.assertEquals(correlationQuery.replace("\n", "").replace(" ", ""), Strings.toString(XContentType.JSON, actualBuilder)); + } + + /** + * test serde with xcontent with filters + * @throws IOException IOException + */ + public void testFromXContentWithFilters() throws IOException { + CorrelationQueryBuilder correlationQueryBuilder = new CorrelationQueryBuilder(FIELD_NAME, QUERY_VECTOR, K, TERM_QUERY); + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + builder.startObject(correlationQueryBuilder.fieldName()); + builder.field(CorrelationQueryBuilder.VECTOR_FIELD.getPreferredName(), correlationQueryBuilder.vector()); + builder.field(CorrelationQueryBuilder.K_FIELD.getPreferredName(), correlationQueryBuilder.getK()); + builder.field(CorrelationQueryBuilder.FILTER_FIELD.getPreferredName(), correlationQueryBuilder.getFilter()); + builder.endObject(); + builder.endObject(); + XContentParser contentParser = createParser(builder); + contentParser.nextToken(); + CorrelationQueryBuilder actualBuilder = CorrelationQueryBuilder.parse(contentParser); + Assert.assertEquals(actualBuilder, correlationQueryBuilder); + } + + /** + * test conversion o KnnFloatVectorQuery logic + * @throws IOException IOException + */ + public void testDoToQuery() throws IOException { + CorrelationQueryBuilder correlationQueryBuilder = new CorrelationQueryBuilder(FIELD_NAME, QUERY_VECTOR, K); + Index dummyIndex = new Index("dummy", "dummy"); + QueryShardContext mockQueryShardContext = mock(QueryShardContext.class); + VectorFieldMapper.CorrelationVectorFieldType mockCorrVectorField = mock(VectorFieldMapper.CorrelationVectorFieldType.class); + when(mockQueryShardContext.index()).thenReturn(dummyIndex); + when(mockCorrVectorField.getDimension()).thenReturn(4); + when(mockQueryShardContext.fieldMapper(anyString())).thenReturn(mockCorrVectorField); + KnnFloatVectorQuery query = (KnnFloatVectorQuery) correlationQueryBuilder.doToQuery(mockQueryShardContext); + Assert.assertEquals(FIELD_NAME, query.getField()); + Assert.assertArrayEquals(QUERY_VECTOR, query.getTargetCopy(), 0.1f); + Assert.assertEquals(K, query.getK()); + } + + /** + * test conversion o KnnFloatVectorQuery logic with filter + * @throws IOException IOException + */ + public void testDoToQueryWithFilter() throws IOException { + CorrelationQueryBuilder correlationQueryBuilder = new CorrelationQueryBuilder(FIELD_NAME, QUERY_VECTOR, K, TERM_QUERY); + Index dummyIndex = new Index("dummy", "dummy"); + QueryShardContext mockQueryShardContext = mock(QueryShardContext.class); + VectorFieldMapper.CorrelationVectorFieldType mockCorrVectorField = mock(VectorFieldMapper.CorrelationVectorFieldType.class); + when(mockQueryShardContext.index()).thenReturn(dummyIndex); + when(mockCorrVectorField.getDimension()).thenReturn(4); + when(mockQueryShardContext.fieldMapper(anyString())).thenReturn(mockCorrVectorField); + KnnFloatVectorQuery query = (KnnFloatVectorQuery) correlationQueryBuilder.doToQuery(mockQueryShardContext); + Assert.assertEquals(FIELD_NAME, query.getField()); + Assert.assertArrayEquals(QUERY_VECTOR, query.getTargetCopy(), 0.1f); + Assert.assertEquals(K, query.getK()); + Assert.assertEquals(TERM_QUERY.toQuery(mockQueryShardContext), query.getFilter()); + } + + /** + * test conversion o KnnFloatVectorQuery logic failure with invalid dimensions + */ + public void testDoToQueryInvalidDimensions() { + CorrelationQueryBuilder correlationQueryBuilder = new CorrelationQueryBuilder(FIELD_NAME, QUERY_VECTOR, K); + Index dummyIndex = new Index("dummy", "dummy"); + QueryShardContext mockQueryShardContext = mock(QueryShardContext.class); + VectorFieldMapper.CorrelationVectorFieldType mockCorrVectorField = mock(VectorFieldMapper.CorrelationVectorFieldType.class); + when(mockQueryShardContext.index()).thenReturn(dummyIndex); + when(mockCorrVectorField.getDimension()).thenReturn(400); + when(mockQueryShardContext.fieldMapper(anyString())).thenReturn(mockCorrVectorField); + expectThrows(IllegalArgumentException.class, () -> correlationQueryBuilder.doToQuery(mockQueryShardContext)); + } + + /** + * test conversion o KnnFloatVectorQuery logic failure with invalid field type + */ + public void testDoToQueryInvalidFieldType() { + CorrelationQueryBuilder correlationQueryBuilder = new CorrelationQueryBuilder(FIELD_NAME, QUERY_VECTOR, K); + Index dummyIndex = new Index("dummy", "dummy"); + QueryShardContext mockQueryShardContext = mock(QueryShardContext.class); + NumberFieldMapper.NumberFieldType mockCorrVectorField = mock(NumberFieldMapper.NumberFieldType.class); + when(mockQueryShardContext.index()).thenReturn(dummyIndex); + when(mockQueryShardContext.fieldMapper(anyString())).thenReturn(mockCorrVectorField); + expectThrows(IllegalArgumentException.class, () -> correlationQueryBuilder.doToQuery(mockQueryShardContext)); + } + + /** + * test serialization of Correlation Query Builder + * @throws Exception + */ + public void testSerialization() throws Exception { + assertSerialization(Optional.empty()); + assertSerialization(Optional.of(TERM_QUERY)); + } + + private void assertSerialization(final Optional queryBuilderOptional) throws IOException { + final CorrelationQueryBuilder builder = queryBuilderOptional.isPresent() + ? new CorrelationQueryBuilder(FIELD_NAME, QUERY_VECTOR, K, queryBuilderOptional.get()) + : new CorrelationQueryBuilder(FIELD_NAME, QUERY_VECTOR, K); + + try (BytesStreamOutput output = new BytesStreamOutput()) { + output.setVersion(Version.CURRENT); + output.writeNamedWriteable(builder); + + try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), writableRegistry())) { + in.setVersion(Version.CURRENT); + final QueryBuilder deserializedQuery = in.readNamedWriteable(QueryBuilder.class); + + assertNotNull(deserializedQuery); + assertTrue(deserializedQuery instanceof CorrelationQueryBuilder); + final CorrelationQueryBuilder deserializedKnnQueryBuilder = (CorrelationQueryBuilder) deserializedQuery; + assertEquals(FIELD_NAME, deserializedKnnQueryBuilder.fieldName()); + assertArrayEquals(QUERY_VECTOR, (float[]) deserializedKnnQueryBuilder.vector(), 0.0f); + assertEquals(K, deserializedKnnQueryBuilder.getK()); + if (queryBuilderOptional.isPresent()) { + assertNotNull(deserializedKnnQueryBuilder.getFilter()); + assertEquals(queryBuilderOptional.get(), deserializedKnnQueryBuilder.getFilter()); + } else { + assertNull(deserializedKnnQueryBuilder.getFilter()); + } + } + } + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List list = ClusterModule.getNamedXWriteables(); + SearchPlugin.QuerySpec spec = new SearchPlugin.QuerySpec<>( + TermQueryBuilder.NAME, + TermQueryBuilder::new, + TermQueryBuilder::fromXContent + ); + list.add(new NamedXContentRegistry.Entry(QueryBuilder.class, spec.getName(), (p, c) -> spec.getParser().fromXContent(p))); + NamedXContentRegistry registry = new NamedXContentRegistry(list); + return registry; + } + + @Override + protected NamedWriteableRegistry writableRegistry() { + final List entries = ClusterModule.getNamedWriteables(); + entries.add( + new NamedWriteableRegistry.Entry( + QueryBuilder.class, + CorrelationQueryBuilder.NAME_FIELD.getPreferredName(), + CorrelationQueryBuilder::new + ) + ); + entries.add(new NamedWriteableRegistry.Entry(QueryBuilder.class, TermQueryBuilder.NAME, TermQueryBuilder::new)); + return new NamedWriteableRegistry(entries); + } +} diff --git a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/settings/EventsCorrelationSettingsTests.java b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/settings/EventsCorrelationSettingsTests.java new file mode 100644 index 0000000000000..faec6057b04c6 --- /dev/null +++ b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/settings/EventsCorrelationSettingsTests.java @@ -0,0 +1,58 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.correlation.settings; + +import org.junit.Assert; +import org.junit.Before; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.plugin.correlation.EventsCorrelationPlugin; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +/** + * Unit tests for Correlation Engine settings + */ +public class EventsCorrelationSettingsTests extends OpenSearchTestCase { + + private EventsCorrelationPlugin plugin; + + @Before + public void setup() { + plugin = new EventsCorrelationPlugin(); + } + + /** + * test all plugin settings returned + */ + public void testAllPluginSettingsReturned() { + List expectedSettings = List.of( + EventsCorrelationSettings.IS_CORRELATION_INDEX_SETTING, + EventsCorrelationSettings.CORRELATION_TIME_WINDOW + ); + + List> settings = plugin.getSettings(); + Assert.assertTrue(settings.containsAll(expectedSettings)); + } + + /** + * test settings get value + */ + public void testSettingsGetValue() { + Settings settings = Settings.builder().put("index.correlation", true).build(); + Assert.assertEquals(EventsCorrelationSettings.IS_CORRELATION_INDEX_SETTING.get(settings), true); + settings = Settings.builder() + .put("plugins.security_analytics.correlation_time_window", new TimeValue(10, TimeUnit.MINUTES)) + .build(); + Assert.assertEquals(EventsCorrelationSettings.CORRELATION_TIME_WINDOW.get(settings), new TimeValue(10, TimeUnit.MINUTES)); + } +} diff --git a/plugins/identity-shiro/build.gradle b/plugins/identity-shiro/build.gradle index 22dc21864b620..baa3464d0a98e 100644 --- a/plugins/identity-shiro/build.gradle +++ b/plugins/identity-shiro/build.gradle @@ -28,7 +28,7 @@ dependencies { implementation 'org.passay:passay:1.6.3' - implementation "org.bouncycastle:bcprov-jdk15on:${versions.bouncycastle}" + implementation "org.bouncycastle:bcprov-jdk15to18:${versions.bouncycastle}" testImplementation project(path: ':modules:transport-netty4') // for http testImplementation project(path: ':plugins:transport-nio') // for http diff --git a/plugins/identity-shiro/licenses/bcprov-jdk15on-1.70.jar.sha1 b/plugins/identity-shiro/licenses/bcprov-jdk15on-1.70.jar.sha1 deleted file mode 100644 index f5e89c0f5ed45..0000000000000 --- a/plugins/identity-shiro/licenses/bcprov-jdk15on-1.70.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4636a0d01f74acaf28082fb62b317f1080118371 \ No newline at end of file diff --git a/plugins/identity-shiro/licenses/bcprov-jdk15to18-1.75.jar.sha1 b/plugins/identity-shiro/licenses/bcprov-jdk15to18-1.75.jar.sha1 new file mode 100644 index 0000000000000..9911bb75f9209 --- /dev/null +++ b/plugins/identity-shiro/licenses/bcprov-jdk15to18-1.75.jar.sha1 @@ -0,0 +1 @@ +df22e1b6a9f6b218913f5b68dd16641344397fe0 \ No newline at end of file diff --git a/plugins/identity-shiro/licenses/bcprov-jdk15on-LICENSE.txt b/plugins/identity-shiro/licenses/bcprov-jdk15to18-LICENSE.txt similarity index 100% rename from plugins/identity-shiro/licenses/bcprov-jdk15on-LICENSE.txt rename to plugins/identity-shiro/licenses/bcprov-jdk15to18-LICENSE.txt diff --git a/plugins/identity-shiro/licenses/bcprov-jdk15on-NOTICE.txt b/plugins/identity-shiro/licenses/bcprov-jdk15to18-NOTICE.txt similarity index 100% rename from plugins/identity-shiro/licenses/bcprov-jdk15on-NOTICE.txt rename to plugins/identity-shiro/licenses/bcprov-jdk15to18-NOTICE.txt diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index 1f3c80909733c..62651216c8144 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -71,9 +71,9 @@ dependencies { api "org.apache.pdfbox:fontbox:${versions.pdfbox}" api "org.apache.pdfbox:jempbox:1.8.17" api "commons-logging:commons-logging:${versions.commonslogging}" - api "org.bouncycastle:bcmail-jdk15on:${versions.bouncycastle}" - api "org.bouncycastle:bcprov-jdk15on:${versions.bouncycastle}" - api "org.bouncycastle:bcpkix-jdk15on:${versions.bouncycastle}" + api "org.bouncycastle:bcmail-jdk15to18:${versions.bouncycastle}" + api "org.bouncycastle:bcprov-jdk15to18:${versions.bouncycastle}" + api "org.bouncycastle:bcpkix-jdk15to18:${versions.bouncycastle}" // OpenOffice api "org.apache.poi:poi-ooxml:${versions.poi}" api "org.apache.poi:poi:${versions.poi}" diff --git a/plugins/ingest-attachment/licenses/bcmail-jdk15on-1.70.jar.sha1 b/plugins/ingest-attachment/licenses/bcmail-jdk15on-1.70.jar.sha1 deleted file mode 100644 index 672e479eda8d7..0000000000000 --- a/plugins/ingest-attachment/licenses/bcmail-jdk15on-1.70.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -08f4aafad90f6cc7f16b9992279828ae848c9e0d \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcmail-jdk15to18-1.75.jar.sha1 b/plugins/ingest-attachment/licenses/bcmail-jdk15to18-1.75.jar.sha1 new file mode 100644 index 0000000000000..e6840a9b02b38 --- /dev/null +++ b/plugins/ingest-attachment/licenses/bcmail-jdk15to18-1.75.jar.sha1 @@ -0,0 +1 @@ +b316bcd094e3917b1ece93a6edbab93f8315fb3b \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcmail-jdk15on-LICENSE.txt b/plugins/ingest-attachment/licenses/bcmail-jdk15to18-LICENSE.txt similarity index 100% rename from plugins/ingest-attachment/licenses/bcmail-jdk15on-LICENSE.txt rename to plugins/ingest-attachment/licenses/bcmail-jdk15to18-LICENSE.txt diff --git a/plugins/ingest-attachment/licenses/bcmail-jdk15on-NOTICE.txt b/plugins/ingest-attachment/licenses/bcmail-jdk15to18-NOTICE.txt similarity index 100% rename from plugins/ingest-attachment/licenses/bcmail-jdk15on-NOTICE.txt rename to plugins/ingest-attachment/licenses/bcmail-jdk15to18-NOTICE.txt diff --git a/plugins/ingest-attachment/licenses/bcpkix-jdk15on-1.70.jar.sha1 b/plugins/ingest-attachment/licenses/bcpkix-jdk15on-1.70.jar.sha1 deleted file mode 100644 index e348463a21257..0000000000000 --- a/plugins/ingest-attachment/licenses/bcpkix-jdk15on-1.70.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f81e5af49571a9d5a109a88f239a73ce87055417 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcpkix-jdk15to18-1.75.jar.sha1 b/plugins/ingest-attachment/licenses/bcpkix-jdk15to18-1.75.jar.sha1 new file mode 100644 index 0000000000000..9181b1c3ab1b6 --- /dev/null +++ b/plugins/ingest-attachment/licenses/bcpkix-jdk15to18-1.75.jar.sha1 @@ -0,0 +1 @@ +f16e5252ad7a46d5eaf255231b0a5da307599082 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcpkix-jdk15on-LICENSE.txt b/plugins/ingest-attachment/licenses/bcpkix-jdk15to18-LICENSE.txt similarity index 100% rename from plugins/ingest-attachment/licenses/bcpkix-jdk15on-LICENSE.txt rename to plugins/ingest-attachment/licenses/bcpkix-jdk15to18-LICENSE.txt diff --git a/plugins/ingest-attachment/licenses/bcpkix-jdk15on-NOTICE.txt b/plugins/ingest-attachment/licenses/bcpkix-jdk15to18-NOTICE.txt similarity index 100% rename from plugins/ingest-attachment/licenses/bcpkix-jdk15on-NOTICE.txt rename to plugins/ingest-attachment/licenses/bcpkix-jdk15to18-NOTICE.txt diff --git a/plugins/ingest-attachment/licenses/bcprov-jdk15on-1.70.jar.sha1 b/plugins/ingest-attachment/licenses/bcprov-jdk15on-1.70.jar.sha1 deleted file mode 100644 index f5e89c0f5ed45..0000000000000 --- a/plugins/ingest-attachment/licenses/bcprov-jdk15on-1.70.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4636a0d01f74acaf28082fb62b317f1080118371 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcprov-jdk15to18-1.75.jar.sha1 b/plugins/ingest-attachment/licenses/bcprov-jdk15to18-1.75.jar.sha1 new file mode 100644 index 0000000000000..9911bb75f9209 --- /dev/null +++ b/plugins/ingest-attachment/licenses/bcprov-jdk15to18-1.75.jar.sha1 @@ -0,0 +1 @@ +df22e1b6a9f6b218913f5b68dd16641344397fe0 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcprov-jdk15on-LICENSE.txt b/plugins/ingest-attachment/licenses/bcprov-jdk15to18-LICENSE.txt similarity index 100% rename from plugins/ingest-attachment/licenses/bcprov-jdk15on-LICENSE.txt rename to plugins/ingest-attachment/licenses/bcprov-jdk15to18-LICENSE.txt diff --git a/plugins/ingest-attachment/licenses/bcprov-jdk15on-NOTICE.txt b/plugins/ingest-attachment/licenses/bcprov-jdk15to18-NOTICE.txt similarity index 100% rename from plugins/ingest-attachment/licenses/bcprov-jdk15on-NOTICE.txt rename to plugins/ingest-attachment/licenses/bcprov-jdk15to18-NOTICE.txt diff --git a/plugins/ingest-attachment/src/main/plugin-metadata/plugin-security.policy b/plugins/ingest-attachment/src/main/plugin-metadata/plugin-security.policy index 0fa85f6f040f6..4b90f9a21aae4 100644 --- a/plugins/ingest-attachment/src/main/plugin-metadata/plugin-security.policy +++ b/plugins/ingest-attachment/src/main/plugin-metadata/plugin-security.policy @@ -35,9 +35,6 @@ grant { // needed to apply additional sandboxing to tika parsing permission java.security.SecurityPermission "createAccessControlContext"; - // TODO: fix PDFBox not to actually install bouncy castle like this - permission java.security.SecurityPermission "putProviderProperty.BC"; - permission java.security.SecurityPermission "insertProvider"; // TODO: fix POI XWPF to not do this: https://bz.apache.org/bugzilla/show_bug.cgi?id=58597 permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; // needed by xmlbeans, as part of POI for MS xml docs diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 48a49af165542..4edb9e0b1913e 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -58,9 +58,9 @@ dependencies { api 'com.azure:azure-storage-blob:12.22.2' api 'org.reactivestreams:reactive-streams:1.0.4' api 'io.projectreactor:reactor-core:3.5.6' - api 'io.projectreactor.netty:reactor-netty:1.1.7' - api 'io.projectreactor.netty:reactor-netty-core:1.1.7' - api 'io.projectreactor.netty:reactor-netty-http:1.1.7' + api 'io.projectreactor.netty:reactor-netty:1.1.8' + api 'io.projectreactor.netty:reactor-netty-core:1.1.8' + api 'io.projectreactor.netty:reactor-netty-http:1.1.8' api "org.slf4j:slf4j-api:${versions.slf4j}" api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.93.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.93.Final.jar.sha1 deleted file mode 100644 index 8ddec4af248f4..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-dns-4.1.93.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a1433e4ae1cc56c0809b5b740ff8800d20b33c36 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.94.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.94.Final.jar.sha1 new file mode 100644 index 0000000000000..670bd4c98a044 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-dns-4.1.94.Final.jar.sha1 @@ -0,0 +1 @@ +9180660dc8479e1594b60b02fc27404af0ea43a6 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.93.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.93.Final.jar.sha1 deleted file mode 100644 index 02423842d6244..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-http2-4.1.93.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0f1625b43bde13ec057da0d2fe381ded2547a70e \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.94.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.94.Final.jar.sha1 new file mode 100644 index 0000000000000..e73026b412972 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-http2-4.1.94.Final.jar.sha1 @@ -0,0 +1 @@ +f651595784d6cca4cbca6a8ad74c48fceed6cea8 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.93.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.93.Final.jar.sha1 deleted file mode 100644 index 3e3f699a3b9a9..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-socks-4.1.93.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -81ca78969afc60073e47c3b7b361cc3839392c73 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.94.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.94.Final.jar.sha1 new file mode 100644 index 0000000000000..de2c4d00aef09 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-socks-4.1.94.Final.jar.sha1 @@ -0,0 +1 @@ +b9192c7cda295d75f236a13a0b1f5a008f05d516 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.93.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.93.Final.jar.sha1 deleted file mode 100644 index 1ac94e6579c89..0000000000000 --- a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.93.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c165c1efe1b9c0cc22546a057b530611a088768b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.94.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.94.Final.jar.sha1 new file mode 100644 index 0000000000000..a2db8bece8f6f --- /dev/null +++ b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.94.Final.jar.sha1 @@ -0,0 +1 @@ +26ba9d30b8f7b095155b9ac63378d6d9386d85c3 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.93.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.93.Final.jar.sha1 deleted file mode 100644 index 67eb275207963..0000000000000 --- a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.93.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d5560348ab139dc9ea2dd9c0aa8ddffd0bf1b60f \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.94.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.94.Final.jar.sha1 new file mode 100644 index 0000000000000..2fa927b3b77ba --- /dev/null +++ b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.94.Final.jar.sha1 @@ -0,0 +1 @@ +25bbe90e10685ce63c32bd0db56574cffffa28de \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.93.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.93.Final.jar.sha1 deleted file mode 100644 index 0f0acb2eccddf..0000000000000 --- a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.93.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f4fb8b4c8da539091f43abcbb9f0389e48807eea \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.94.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.94.Final.jar.sha1 new file mode 100644 index 0000000000000..43bc960a347a1 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.94.Final.jar.sha1 @@ -0,0 +1 @@ +3fa5f9d04b6b782d869d6e0657d896eeadca5866 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-1.1.7.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-1.1.7.jar.sha1 deleted file mode 100644 index 01a9b1d34d52f..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-1.1.7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c16497c29f96ea7b1db538cb0ddde55d9be173fe \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-1.1.8.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-1.1.8.jar.sha1 new file mode 100644 index 0000000000000..6b6bf1903b16c --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-1.1.8.jar.sha1 @@ -0,0 +1 @@ +d53a9d7d0395285f4c81664494fcd61477626e32 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.7.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.7.jar.sha1 deleted file mode 100644 index 62ed795cb11e9..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-core-1.1.7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d38bb526a501f52c4476b03730c710a96f8fd35b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.8.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.8.jar.sha1 new file mode 100644 index 0000000000000..707631f4dfe0c --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-core-1.1.8.jar.sha1 @@ -0,0 +1 @@ +48999c4ae27cdcee5eaff9dfd150a8b64624f0f5 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.7.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.7.jar.sha1 deleted file mode 100644 index 33bf2aabcfc9b..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-http-1.1.7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -39d7c0a13afa471b426a30bcf82664496ad34723 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.8.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.8.jar.sha1 new file mode 100644 index 0000000000000..5092608c90eba --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-http-1.1.8.jar.sha1 @@ -0,0 +1 @@ +696ea25658295e49906c6aad13fa70acbdeb2359 \ No newline at end of file diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 5d7a4c64ae34a..41c36dffea296 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -52,33 +52,43 @@ versions << [ ] dependencies { - api 'com.google.cloud:google-cloud-storage:1.113.1' + api 'com.google.api:api-common:1.8.1' + api 'com.google.api:gax:2.27.0' + api 'com.google.api:gax-httpjson:0.103.1' + + api 'com.google.apis:google-api-services-storage:v1-rev20230617-2.0.0' + + api 'com.google.api-client:google-api-client:2.2.0' + + api 'com.google.api.grpc:proto-google-common-protos:2.10.0' + api 'com.google.api.grpc:proto-google-iam-v1:0.12.0' + + api "com.google.auth:google-auth-library-credentials:${versions.google_auth}" + api "com.google.auth:google-auth-library-oauth2-http:${versions.google_auth}" + api 'com.google.cloud:google-cloud-core:2.5.10' + api 'com.google.cloud:google-cloud-core-http:2.17.0' + api 'com.google.cloud:google-cloud-storage:1.113.1' + + api 'com.google.code.gson:gson:2.9.0' + runtimeOnly "com.google.guava:guava:${versions.guava}" api 'com.google.guava:failureaccess:1.0.1' + api 'com.google.http-client:google-http-client:1.43.2' + api 'com.google.http-client:google-http-client-appengine:1.43.2' + api 'com.google.http-client:google-http-client-gson:1.43.2' + api 'com.google.http-client:google-http-client-jackson2:1.43.2' + + api 'com.google.oauth-client:google-oauth-client:1.34.1' + api "commons-logging:commons-logging:${versions.commonslogging}" api "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" api "commons-codec:commons-codec:${versions.commonscodec}" - api 'com.google.api:api-common:1.8.1' - api 'com.google.api:gax:2.27.0' api 'org.threeten:threetenbp:1.4.4' - api 'com.google.code.gson:gson:2.9.0' - api 'com.google.api.grpc:proto-google-common-protos:2.10.0' - api 'com.google.api.grpc:proto-google-iam-v1:0.12.0' - api 'com.google.cloud:google-cloud-core-http:2.17.0' - api "com.google.auth:google-auth-library-credentials:${versions.google_auth}" - api "com.google.auth:google-auth-library-oauth2-http:${versions.google_auth}" - api 'com.google.oauth-client:google-oauth-client:1.33.3' - api 'com.google.api-client:google-api-client:1.34.0' - api 'com.google.http-client:google-http-client-appengine:1.43.2' - api 'com.google.http-client:google-http-client-jackson2:1.42.3' - api 'com.google.http-client:google-http-client-gson:1.41.4' - api 'com.google.api:gax-httpjson:0.103.1' api 'io.grpc:grpc-context:1.46.0' api 'io.opencensus:opencensus-api:0.31.1' api 'io.opencensus:opencensus-contrib-http-util:0.31.1' - api 'com.google.apis:google-api-services-storage:v1-rev20220608-1.32.1' testImplementation project(':test:fixtures:gcs-fixture') } @@ -167,8 +177,6 @@ thirdPartyAudit { 'org.apache.http.client.methods.HttpRequestBase', 'org.apache.http.config.Registry', 'org.apache.http.config.RegistryBuilder', - 'org.apache.http.config.SocketConfig', - 'org.apache.http.config.SocketConfig$Builder', 'org.apache.http.conn.ClientConnectionManager', 'org.apache.http.conn.ConnectionKeepAliveStrategy', 'org.apache.http.conn.params.ConnManagerParams', diff --git a/plugins/repository-gcs/licenses/google-api-client-1.34.0.jar.sha1 b/plugins/repository-gcs/licenses/google-api-client-1.34.0.jar.sha1 deleted file mode 100644 index 9be9480435085..0000000000000 --- a/plugins/repository-gcs/licenses/google-api-client-1.34.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -af2586412cabeee49c9db6d736e75b745bc467f8 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-api-client-2.2.0.jar.sha1 b/plugins/repository-gcs/licenses/google-api-client-2.2.0.jar.sha1 new file mode 100644 index 0000000000000..f9604d6837ca9 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-api-client-2.2.0.jar.sha1 @@ -0,0 +1 @@ +10e53fd4d987e37190432e896bdaa62e8ea2c628 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev20220608-1.32.1.jar.sha1 b/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev20220608-1.32.1.jar.sha1 deleted file mode 100644 index 07aaadb2664b2..0000000000000 --- a/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev20220608-1.32.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -74724addc6cecac408dad3a6a26423b7647b3724 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev20230617-2.0.0.jar.sha1 b/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev20230617-2.0.0.jar.sha1 new file mode 100644 index 0000000000000..1a1452f773b96 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev20230617-2.0.0.jar.sha1 @@ -0,0 +1 @@ +fc3f225b405303fe7cb760d578348b6b07e7ea8b \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-gson-1.41.4.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-gson-1.41.4.jar.sha1 deleted file mode 100644 index 17960a99abea2..0000000000000 --- a/plugins/repository-gcs/licenses/google-http-client-gson-1.41.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fa665c1c573765dd858bc34931ad747e4ed11efe \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-gson-1.43.2.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-gson-1.43.2.jar.sha1 new file mode 100644 index 0000000000000..df0374aa27c70 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-gson-1.43.2.jar.sha1 @@ -0,0 +1 @@ +b1c2e3e89804c113dba7b342aa8e0fc2cf3d9378 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-jackson2-1.42.3.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-jackson2-1.42.3.jar.sha1 deleted file mode 100644 index 34d7d49f7b147..0000000000000 --- a/plugins/repository-gcs/licenses/google-http-client-jackson2-1.42.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -789cafde696403b429026bf19071caf46d8c8934 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-jackson2-1.43.2.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-jackson2-1.43.2.jar.sha1 new file mode 100644 index 0000000000000..7b606a07651ed --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-jackson2-1.43.2.jar.sha1 @@ -0,0 +1 @@ +5e52a9967ebd8246fc4cca64df5f03608db5ac6e \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-oauth-client-1.33.3.jar.sha1 b/plugins/repository-gcs/licenses/google-oauth-client-1.33.3.jar.sha1 deleted file mode 100644 index f2afaa1bc2dba..0000000000000 --- a/plugins/repository-gcs/licenses/google-oauth-client-1.33.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9d445a8649b0de731922b9a3ebf1552b5403611d \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-oauth-client-1.34.1.jar.sha1 b/plugins/repository-gcs/licenses/google-oauth-client-1.34.1.jar.sha1 new file mode 100644 index 0000000000000..a8434bd380761 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-oauth-client-1.34.1.jar.sha1 @@ -0,0 +1 @@ +4a4f88c5e13143f882268c98239fb85c3b2c6cb2 \ No newline at end of file diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 7ac54544f7a1b..3c83e535a91d8 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -66,7 +66,7 @@ dependencies { } api 'org.apache.htrace:htrace-core4:4.2.0-incubating' api "org.apache.logging.log4j:log4j-core:${versions.log4j}" - api 'org.apache.avro:avro:1.11.1' + api 'org.apache.avro:avro:1.11.2' api 'com.google.code.gson:gson:2.10.1' runtimeOnly "com.google.guava:guava:${versions.guava}" api "commons-logging:commons-logging:${versions.commonslogging}" diff --git a/plugins/repository-hdfs/licenses/avro-1.11.1.jar.sha1 b/plugins/repository-hdfs/licenses/avro-1.11.1.jar.sha1 deleted file mode 100644 index f03424516b44e..0000000000000 --- a/plugins/repository-hdfs/licenses/avro-1.11.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -81af5d4b9bdaaf4ba41bcb0df5241355ec34c630 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/avro-1.11.2.jar.sha1 b/plugins/repository-hdfs/licenses/avro-1.11.2.jar.sha1 new file mode 100644 index 0000000000000..ce1a894e0ce6d --- /dev/null +++ b/plugins/repository-hdfs/licenses/avro-1.11.2.jar.sha1 @@ -0,0 +1 @@ +97e62e8be2b37e849f1bdb5a4f08121d47cc9806 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.93.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.93.Final.jar.sha1 deleted file mode 100644 index a0f7bc4bf65ef..0000000000000 --- a/plugins/repository-hdfs/licenses/netty-all-4.1.93.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -408780d8b32ce4510f5408b06efabc2230aaf9b6 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.94.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.94.Final.jar.sha1 new file mode 100644 index 0000000000000..6766770f61e78 --- /dev/null +++ b/plugins/repository-hdfs/licenses/netty-all-4.1.94.Final.jar.sha1 @@ -0,0 +1 @@ +2a7df0424eed81818157f22613f36b72487ceb34 \ No newline at end of file diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 2250f2fc88f05..44fd45b265e82 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -439,7 +439,6 @@ thirdPartyAudit { 'com.aayushatharva.brotli4j.decoder.DecoderJNI$Status', 'com.aayushatharva.brotli4j.decoder.DecoderJNI$Wrapper', 'com.aayushatharva.brotli4j.encoder.BrotliEncoderChannel', - 'com.aayushatharva.brotli4j.encoder.Encoder', 'com.aayushatharva.brotli4j.encoder.Encoder$Mode', 'com.aayushatharva.brotli4j.encoder.Encoder$Parameters', diff --git a/plugins/repository-s3/licenses/httpclient-4.5.13.jar.sha1 b/plugins/repository-s3/licenses/httpclient-4.5.13.jar.sha1 deleted file mode 100644 index 3281e21595b39..0000000000000 --- a/plugins/repository-s3/licenses/httpclient-4.5.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e5f6cae5ca7ecaac1ec2827a9e2d65ae2869cada \ No newline at end of file diff --git a/plugins/repository-s3/licenses/httpclient-4.5.14.jar.sha1 b/plugins/repository-s3/licenses/httpclient-4.5.14.jar.sha1 new file mode 100644 index 0000000000000..66e05851c2e3c --- /dev/null +++ b/plugins/repository-s3/licenses/httpclient-4.5.14.jar.sha1 @@ -0,0 +1 @@ +1194890e6f56ec29177673f2f12d0b8e627dec98 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/httpcore-4.4.15.jar.sha1 b/plugins/repository-s3/licenses/httpcore-4.4.15.jar.sha1 deleted file mode 100644 index 42a03b5d7a376..0000000000000 --- a/plugins/repository-s3/licenses/httpcore-4.4.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7f2e0c573eaa7a74bac2e89b359e1f73d92a0a1d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/httpcore-4.4.16.jar.sha1 b/plugins/repository-s3/licenses/httpcore-4.4.16.jar.sha1 new file mode 100644 index 0000000000000..172110694b5bd --- /dev/null +++ b/plugins/repository-s3/licenses/httpcore-4.4.16.jar.sha1 @@ -0,0 +1 @@ +51cf043c87253c9f58b539c9f7e44c8894223850 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.93.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.93.Final.jar.sha1 deleted file mode 100644 index 5c5a17a9466f1..0000000000000 --- a/plugins/repository-s3/licenses/netty-buffer-4.1.93.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -87fac21f4ef95157866b07b48e3c707a2f13c581 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.94.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.94.Final.jar.sha1 new file mode 100644 index 0000000000000..05b1c2a4d614e --- /dev/null +++ b/plugins/repository-s3/licenses/netty-buffer-4.1.94.Final.jar.sha1 @@ -0,0 +1 @@ +eec248b26f16e888688e5bb37b7eeda76b78d2f7 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.93.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.93.Final.jar.sha1 deleted file mode 100644 index 2b12a111335a2..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-4.1.93.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -503badb458b6586632be8d1f81aa4e5ab99a80fc \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.94.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.94.Final.jar.sha1 new file mode 100644 index 0000000000000..baa7e25f1ac49 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-4.1.94.Final.jar.sha1 @@ -0,0 +1 @@ +c70ef20ca338558147887df60f46341bc47f6900 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.93.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.93.Final.jar.sha1 deleted file mode 100644 index 6719e882e40fe..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http-4.1.93.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -36acf0c94d03eb6ecef78a749a32cbb7dc0c57b4 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.94.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.94.Final.jar.sha1 new file mode 100644 index 0000000000000..8c018be2565e5 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http-4.1.94.Final.jar.sha1 @@ -0,0 +1 @@ +9e5404764092c1f6305ad5719078f46ab228d587 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.93.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.93.Final.jar.sha1 deleted file mode 100644 index 02423842d6244..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http2-4.1.93.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0f1625b43bde13ec057da0d2fe381ded2547a70e \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.94.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.94.Final.jar.sha1 new file mode 100644 index 0000000000000..e73026b412972 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http2-4.1.94.Final.jar.sha1 @@ -0,0 +1 @@ +f651595784d6cca4cbca6a8ad74c48fceed6cea8 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.93.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.93.Final.jar.sha1 deleted file mode 100644 index 2324a54dc5735..0000000000000 --- a/plugins/repository-s3/licenses/netty-common-4.1.93.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1cfc49b91b0d3ddb30c9f7d8467e5d02ae8babdf \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.94.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.94.Final.jar.sha1 new file mode 100644 index 0000000000000..b787338551ede --- /dev/null +++ b/plugins/repository-s3/licenses/netty-common-4.1.94.Final.jar.sha1 @@ -0,0 +1 @@ +ad4ecf779ebc794cd351f57792f56ea01387b868 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.93.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.93.Final.jar.sha1 deleted file mode 100644 index 54e5b22047812..0000000000000 --- a/plugins/repository-s3/licenses/netty-handler-4.1.93.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -10f7ed9d8e1bfcba416074c70e5388be96116bfc \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.94.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.94.Final.jar.sha1 new file mode 100644 index 0000000000000..b08e85ba7adf8 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-handler-4.1.94.Final.jar.sha1 @@ -0,0 +1 @@ +cd9121ce24d6d3f2898946d04b0ef3ec548b00b4 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.93.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.93.Final.jar.sha1 deleted file mode 100644 index c795762c2d7f1..0000000000000 --- a/plugins/repository-s3/licenses/netty-resolver-4.1.93.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3860e99075f9e078364ed38f6d6fc8825b7a168a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.94.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.94.Final.jar.sha1 new file mode 100644 index 0000000000000..4c9e4dda2b852 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-resolver-4.1.94.Final.jar.sha1 @@ -0,0 +1 @@ +e96f649e8e9dcb29a1f8e95328b99c9eb6cf76c2 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.93.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.93.Final.jar.sha1 deleted file mode 100644 index 687cade3c7b3a..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-4.1.93.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1a0894915c8027ce83b4d6a811c4e765955efd15 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.94.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.94.Final.jar.sha1 new file mode 100644 index 0000000000000..ed7760b8e15d1 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-4.1.94.Final.jar.sha1 @@ -0,0 +1 @@ +ec783a737f96991a87b1d5794e2f9eb2024d708a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.93.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.93.Final.jar.sha1 deleted file mode 100644 index 81a180b455cdd..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.93.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -79c2d95406bc2ef38c08c34efd16039919db80be \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.94.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.94.Final.jar.sha1 new file mode 100644 index 0000000000000..72a392ea2917d --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.94.Final.jar.sha1 @@ -0,0 +1 @@ +240e36cd5c2ffaf655913f8857f2d58b26394679 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.93.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.93.Final.jar.sha1 deleted file mode 100644 index 0f0acb2eccddf..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.93.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f4fb8b4c8da539091f43abcbb9f0389e48807eea \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.94.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.94.Final.jar.sha1 new file mode 100644 index 0000000000000..43bc960a347a1 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.94.Final.jar.sha1 @@ -0,0 +1 @@ +3fa5f9d04b6b782d869d6e0657d896eeadca5866 \ No newline at end of file diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java index cf749eeffd903..49ebce77a59ad 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java @@ -36,6 +36,7 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.ExceptionsHelper; +import org.opensearch.action.ActionListener; import org.opensearch.common.Nullable; import org.opensearch.common.SetOnce; import org.opensearch.common.blobstore.BlobContainer; @@ -296,6 +297,35 @@ private static DeleteObjectsRequest bulkDelete(String bucket, List blobs .build(); } + @Override + public void listBlobsByPrefixInSortedOrder( + String blobNamePrefix, + int limit, + BlobNameSortOrder blobNameSortOrder, + ActionListener> listener + ) { + // As AWS S3 returns list of keys in Lexicographic order, we don't have to fetch all the keys in order to sort them + // We fetch only keys as per the given limit to optimize the fetch. If provided sort order is not Lexicographic, + // we fall-back to default implementation of fetching all the keys and sorting them. + if (blobNameSortOrder != BlobNameSortOrder.LEXICOGRAPHIC) { + super.listBlobsByPrefixInSortedOrder(blobNamePrefix, limit, blobNameSortOrder, listener); + } else { + if (limit < 0) { + throw new IllegalArgumentException("limit should not be a negative value"); + } + String prefix = blobNamePrefix == null ? keyPath : buildKey(blobNamePrefix); + try (AmazonS3Reference clientReference = blobStore.clientReference()) { + List blobs = executeListing(clientReference, listObjectsRequest(prefix, limit), limit).stream() + .flatMap(listing -> listing.contents().stream()) + .map(s3Object -> new PlainBlobMetadata(s3Object.key().substring(keyPath.length()), s3Object.size())) + .collect(Collectors.toList()); + listener.onResponse(blobs.subList(0, Math.min(limit, blobs.size()))); + } catch (final Exception e) { + listener.onFailure(new IOException("Exception when listing blobs by prefix [" + prefix + "]", e)); + } + } + } + @Override public Map listBlobsByPrefix(@Nullable String blobNamePrefix) throws IOException { String prefix = blobNamePrefix == null ? keyPath : buildKey(blobNamePrefix); @@ -339,10 +369,25 @@ public Map children() throws IOException { } private static List executeListing(AmazonS3Reference clientReference, ListObjectsV2Request listObjectsRequest) { + return executeListing(clientReference, listObjectsRequest, -1); + } + + private static List executeListing( + AmazonS3Reference clientReference, + ListObjectsV2Request listObjectsRequest, + int limit + ) { return SocketAccess.doPrivileged(() -> { final List results = new ArrayList<>(); + int totalObjects = 0; ListObjectsV2Iterable listObjectsIterable = clientReference.get().listObjectsV2Paginator(listObjectsRequest); - listObjectsIterable.forEach(results::add); + for (ListObjectsV2Response listObjectsV2Response : listObjectsIterable) { + results.add(listObjectsV2Response); + totalObjects += listObjectsV2Response.contents().size(); + if (limit != -1 && totalObjects > limit) { + break; + } + } return results; }); } @@ -356,6 +401,10 @@ private ListObjectsV2Request listObjectsRequest(String keyPath) { .build(); } + private ListObjectsV2Request listObjectsRequest(String keyPath, int limit) { + return listObjectsRequest(keyPath).toBuilder().maxKeys(Math.min(limit, 1000)).build(); + } + private String buildKey(String blobName) { return keyPath + blobName; } diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java index ec16f216f1777..a2a7ca8d8bdd5 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java @@ -33,6 +33,9 @@ package org.opensearch.repositories.s3; import org.mockito.ArgumentCaptor; +import org.opensearch.action.ActionListener; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStoreException; import org.opensearch.common.blobstore.DeleteResult; @@ -74,9 +77,13 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.Comparator; +import java.util.HashSet; import java.util.Iterator; import java.util.List; +import java.util.Map; import java.util.NoSuchElementException; +import java.util.Set; import java.util.UUID; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; @@ -84,12 +91,12 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Mockito.when; +import static org.mockito.Mockito.mock; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; +import static org.mockito.Mockito.times; public class S3BlobStoreContainerTests extends OpenSearchTestCase { @@ -187,26 +194,34 @@ private static class MockListObjectsV2ResponseIterator implements Iterator keysListed = new ArrayList<>(); + private final List keysListed; private final boolean throwExceptionOnNextInvocation; public MockListObjectsV2ResponseIterator(int totalPageCount, int s3ObjectsPerPage, long s3ObjectSize) { - this.totalPageCount = totalPageCount; - this.s3ObjectsPerPage = s3ObjectsPerPage; - this.s3ObjectSize = s3ObjectSize; - this.throwExceptionOnNextInvocation = false; + this(totalPageCount, s3ObjectsPerPage, s3ObjectSize, ""); + } + + public MockListObjectsV2ResponseIterator(int totalPageCount, int s3ObjectsPerPage, long s3ObjectSize, String blobPath) { + this(totalPageCount, s3ObjectsPerPage, s3ObjectSize, blobPath, false); } public MockListObjectsV2ResponseIterator( int totalPageCount, int s3ObjectsPerPage, long s3ObjectSize, + String blobPath, boolean throwExceptionOnNextInvocation ) { this.totalPageCount = totalPageCount; this.s3ObjectsPerPage = s3ObjectsPerPage; this.s3ObjectSize = s3ObjectSize; this.throwExceptionOnNextInvocation = throwExceptionOnNextInvocation; + keysListed = new ArrayList<>(); + for (int i = 0; i < totalPageCount * s3ObjectsPerPage; i++) { + keysListed.add(blobPath + UUID.randomUUID().toString()); + } + // S3 lists keys in lexicographic order + keysListed.sort(String::compareTo); } @Override @@ -220,11 +235,12 @@ public ListObjectsV2Response next() { throw SdkException.builder().build(); } if (currInvocationCount.getAndIncrement() < totalPageCount) { - String s3ObjectKey = UUID.randomUUID().toString(); - keysListed.add(s3ObjectKey); - return ListObjectsV2Response.builder() - .contents(Collections.nCopies(s3ObjectsPerPage, S3Object.builder().key(s3ObjectKey).size(s3ObjectSize).build())) - .build(); + List s3Objects = new ArrayList<>(); + for (int i = 0; i < s3ObjectsPerPage; i++) { + String s3ObjectKey = keysListed.get((currInvocationCount.get() - 1) * s3ObjectsPerPage + i); + s3Objects.add(S3Object.builder().key(s3ObjectKey).size(s3ObjectSize).build()); + } + return ListObjectsV2Response.builder().contents(s3Objects).build(); } throw new NoSuchElementException(); } @@ -232,6 +248,10 @@ public ListObjectsV2Response next() { public List getKeysListed() { return keysListed; } + + public int numberOfPagesFetched() { + return currInvocationCount.get(); + } } public void testDelete() throws IOException { @@ -273,10 +293,8 @@ public void testDelete() throws IOException { // keysDeleted will have blobPath also assertEquals(listObjectsV2ResponseIterator.getKeysListed().size(), keysDeleted.size() - 1); assertTrue(keysDeleted.contains(blobPath.buildAsString())); - assertArrayEquals( - listObjectsV2ResponseIterator.getKeysListed().toArray(String[]::new), - keysDeleted.stream().filter(key -> !blobPath.buildAsString().equals(key)).toArray(String[]::new) - ); + keysDeleted.remove(blobPath.buildAsString()); + assertEquals(new HashSet<>(listObjectsV2ResponseIterator.getKeysListed()), new HashSet<>(keysDeleted)); } public void testDeleteItemLevelErrorsDuringDelete() { @@ -772,4 +790,112 @@ private static void assertNumberOfMultiparts(final int expectedParts, final long assertEquals("Expected number of parts [" + expectedParts + "] but got [" + result.v1() + "]", expectedParts, (long) result.v1()); assertEquals("Expected remaining [" + expectedRemaining + "] but got [" + result.v2() + "]", expectedRemaining, (long) result.v2()); } + + public void testListBlobsByPrefix() throws IOException { + final S3BlobStore blobStore = mock(S3BlobStore.class); + when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + + final S3Client client = mock(S3Client.class); + final AmazonS3Reference clientReference = new AmazonS3Reference(client); + when(blobStore.clientReference()).thenReturn(clientReference); + + BlobPath blobPath = mock(BlobPath.class); + when(blobPath.buildAsString()).thenReturn("/dummy/path"); + final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); + + final ListObjectsV2Iterable listObjectsV2Iterable = mock(ListObjectsV2Iterable.class); + when(client.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listObjectsV2Iterable); + + MockListObjectsV2ResponseIterator iterator = new MockListObjectsV2ResponseIterator(2, 5, 100); + when(listObjectsV2Iterable.iterator()).thenReturn(iterator); + + Map listOfBlobs = blobContainer.listBlobsByPrefix(null); + assertEquals(10, listOfBlobs.size()); + + Set keys = iterator.keysListed.stream() + .map(s -> s.substring(blobPath.buildAsString().length())) + .collect(Collectors.toSet()); + assertEquals(keys, listOfBlobs.keySet()); + } + + private void testListBlobsByPrefixInLexicographicOrder( + int limit, + int expectedNumberofPagesFetched, + BlobContainer.BlobNameSortOrder blobNameSortOrder + ) throws IOException { + final S3BlobStore blobStore = mock(S3BlobStore.class); + when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + + final S3Client client = mock(S3Client.class); + final AmazonS3Reference clientReference = new AmazonS3Reference(client); + when(blobStore.clientReference()).thenReturn(clientReference); + + BlobPath blobPath = mock(BlobPath.class); + when(blobPath.buildAsString()).thenReturn("/dummy/path"); + final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); + + final ListObjectsV2Iterable listObjectsV2Iterable = mock(ListObjectsV2Iterable.class); + when(client.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listObjectsV2Iterable); + + final MockListObjectsV2ResponseIterator iterator = new MockListObjectsV2ResponseIterator(2, 5, 100, blobPath.buildAsString()); + when(listObjectsV2Iterable.iterator()).thenReturn(iterator); + + if (limit >= 0) { + blobContainer.listBlobsByPrefixInSortedOrder(null, limit, blobNameSortOrder, new ActionListener<>() { + @Override + public void onResponse(List blobMetadata) { + int actualLimit = Math.max(0, Math.min(limit, 10)); + assertEquals(actualLimit, blobMetadata.size()); + + List keys = iterator.keysListed.stream() + .map(s -> s.substring(blobPath.buildAsString().length())) + .collect(Collectors.toList()); + Comparator keysComparator = String::compareTo; + if (blobNameSortOrder != BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC) { + keysComparator = Collections.reverseOrder(String::compareTo); + } + keys.sort(keysComparator); + List sortedKeys = keys.subList(0, actualLimit); + assertEquals(sortedKeys, blobMetadata.stream().map(BlobMetadata::name).collect(Collectors.toList())); + assertEquals(expectedNumberofPagesFetched, iterator.numberOfPagesFetched()); + } + + @Override + public void onFailure(Exception e) { + fail("blobContainer.listBlobsByPrefixInLexicographicOrder failed with exception: " + e.getMessage()); + } + }); + } else { + assertThrows( + IllegalArgumentException.class, + () -> blobContainer.listBlobsByPrefixInSortedOrder(null, limit, blobNameSortOrder, new ActionListener<>() { + @Override + public void onResponse(List blobMetadata) {} + + @Override + public void onFailure(Exception e) {} + }) + ); + } + } + + public void testListBlobsByPrefixInLexicographicOrderWithNegativeLimit() throws IOException { + testListBlobsByPrefixInLexicographicOrder(-5, 0, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC); + } + + public void testListBlobsByPrefixInLexicographicOrderWithZeroLimit() throws IOException { + testListBlobsByPrefixInLexicographicOrder(0, 1, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC); + } + + public void testListBlobsByPrefixInLexicographicOrderWithLimitLessThanPageSize() throws IOException { + testListBlobsByPrefixInLexicographicOrder(2, 1, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC); + } + + public void testListBlobsByPrefixInLexicographicOrderWithLimitGreaterThanPageSize() throws IOException { + testListBlobsByPrefixInLexicographicOrder(8, 2, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC); + } + + public void testListBlobsByPrefixInLexicographicOrderWithLimitGreaterThanNumberOfRecords() throws IOException { + testListBlobsByPrefixInLexicographicOrder(12, 2, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC); + } } diff --git a/plugins/telemetry-otel/build.gradle b/plugins/telemetry-otel/build.gradle new file mode 100644 index 0000000000000..2c275388cce38 --- /dev/null +++ b/plugins/telemetry-otel/build.gradle @@ -0,0 +1,78 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +import org.apache.tools.ant.taskdefs.condition.Os +import org.opensearch.gradle.Architecture +import org.opensearch.gradle.OS +import org.opensearch.gradle.info.BuildParams + +opensearchplugin { + description 'Opentelemetry based telemetry implementation.' + classname 'org.opensearch.telemetry.OTelTelemetryPlugin' + hasClientJar = true +} + +dependencies { + api project(":libs:opensearch-telemetry") + api "io.opentelemetry:opentelemetry-api:${versions.opentelemetry}" + api "io.opentelemetry:opentelemetry-context:${versions.opentelemetry}" + api "io.opentelemetry:opentelemetry-sdk:${versions.opentelemetry}" + api "io.opentelemetry:opentelemetry-sdk-common:${versions.opentelemetry}" + api "io.opentelemetry:opentelemetry-sdk-trace:${versions.opentelemetry}" + api "io.opentelemetry:opentelemetry-sdk-metrics:${versions.opentelemetry}" + api "io.opentelemetry:opentelemetry-exporter-logging:${versions.opentelemetry}" + api "io.opentelemetry:opentelemetry-semconv:${versions.opentelemetry}-alpha" + api "io.opentelemetry:opentelemetry-sdk-logs:${versions.opentelemetry}-alpha" + api "io.opentelemetry:opentelemetry-api-logs:${versions.opentelemetry}-alpha" +} + + +thirdPartyAudit { + ignoreViolations( + 'io.opentelemetry.internal.shaded.jctools.queues.MpscArrayQueueConsumerIndexField', + 'io.opentelemetry.internal.shaded.jctools.queues.MpscArrayQueueProducerIndexField', + 'io.opentelemetry.internal.shaded.jctools.queues.MpscArrayQueueProducerLimitField', + 'io.opentelemetry.internal.shaded.jctools.util.UnsafeAccess', + 'io.opentelemetry.internal.shaded.jctools.util.UnsafeRefArrayAccess' + ) + + ignoreMissingClasses( + 'io.opentelemetry.api.events.EventEmitter', + 'io.opentelemetry.api.events.EventEmitterBuilder', + 'io.opentelemetry.api.events.EventEmitterProvider', + 'io.opentelemetry.extension.incubator.metrics.ExtendedDoubleHistogramBuilder', + 'io.opentelemetry.extension.incubator.metrics.ExtendedLongHistogramBuilder', + 'io.opentelemetry.extension.incubator.metrics.HistogramAdviceConfigurer', + 'io.opentelemetry.sdk.autoconfigure.spi.ConfigProperties', + 'io.opentelemetry.sdk.autoconfigure.spi.logs.ConfigurableLogRecordExporterProvider', + 'io.opentelemetry.sdk.autoconfigure.spi.metrics.ConfigurableMetricExporterProvider', + 'io.opentelemetry.sdk.autoconfigure.spi.traces.ConfigurableSpanExporterProvider' + ) +} + +tasks.named("bundlePlugin").configure { + from('config/telemetry-otel') { + into 'config' + } +} + +tasks.register("writeTestJavaPolicy") { + doLast { + final File tmp = file("${buildDir}/tmp") + if (tmp.exists() == false && tmp.mkdirs() == false) { + throw new GradleException("failed to create temporary directory [${tmp}]") + } + final File javaPolicy = file("${tmp}/java.policy") + javaPolicy.write( + [ + "grant {", + " permission java.io.FilePermission \"config\", \"read\";", + "};" + ].join("\n")) + } +} diff --git a/plugins/telemetry-otel/config/telemetry-otel/log4j2.properties b/plugins/telemetry-otel/config/telemetry-otel/log4j2.properties new file mode 100644 index 0000000000000..544f42bd5513b --- /dev/null +++ b/plugins/telemetry-otel/config/telemetry-otel/log4j2.properties @@ -0,0 +1,27 @@ +# +# SPDX-License-Identifier: Apache-2.0 +# +# The OpenSearch Contributors require contributions made to +# this file be licensed under the Apache-2.0 license or a +# compatible open source license. +# + + +appender.tracing.type = RollingFile +appender.tracing.name = tracing +appender.tracing.fileName = ${sys:opensearch.logs.base_path}${sys:file.separator}${sys:opensearch.logs.cluster_name}_otel_traces.log +appender.tracing.filePermissions = rw-r----- +appender.tracing.layout.type = PatternLayout +appender.tracing.layout.pattern = %m%n +appender.tracing.filePattern = ${sys:opensearch.logs.base_path}${sys:file.separator}${sys:opensearch.logs.cluster_name}_otel_traces-%i.log.gz +appender.tracing.policies.type = Policies +appender.tracing.policies.size.type = SizeBasedTriggeringPolicy +appender.tracing.policies.size.size = 1GB +appender.tracing.strategy.type = DefaultRolloverStrategy +appender.tracing.strategy.max = 4 + + +logger.exporter.name = io.opentelemetry.exporter.logging.LoggingSpanExporter +logger.exporter.level = INFO +logger.exporter.appenderRef.tracing.ref = tracing +logger.exporter.additivity = false diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.26.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.26.0.jar.sha1 new file mode 100644 index 0000000000000..da3abcc8f70d2 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-api-1.26.0.jar.sha1 @@ -0,0 +1 @@ +7ee1ccca95155e4640094ba8dfbd0bb8c1709c83 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-LICENSE.txt b/plugins/telemetry-otel/licenses/opentelemetry-api-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-api-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-NOTICE.txt b/plugins/telemetry-otel/licenses/opentelemetry-api-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-logs-1.26.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-logs-1.26.0-alpha.jar.sha1 new file mode 100644 index 0000000000000..2c233d785dcb2 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-api-logs-1.26.0-alpha.jar.sha1 @@ -0,0 +1 @@ +1b0b6c1a20da0f841634d4f736e331aa4871a4db \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-logs-LICENSE.txt b/plugins/telemetry-otel/licenses/opentelemetry-api-logs-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-api-logs-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-logs-NOTICE.txt b/plugins/telemetry-otel/licenses/opentelemetry-api-logs-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.26.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.26.0.jar.sha1 new file mode 100644 index 0000000000000..01d9fd732249b --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-context-1.26.0.jar.sha1 @@ -0,0 +1 @@ +42991f523a7a10761213e2f11633c67c8beaed88 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-LICENSE.txt b/plugins/telemetry-otel/licenses/opentelemetry-context-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-context-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-NOTICE.txt b/plugins/telemetry-otel/licenses/opentelemetry-context-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.26.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.26.0.jar.sha1 new file mode 100644 index 0000000000000..ef07e4cb81e34 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.26.0.jar.sha1 @@ -0,0 +1 @@ +1b932170774da5e766440fa058d879f68fe2c5dd \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-LICENSE.txt b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-NOTICE.txt b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.26.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.26.0.jar.sha1 new file mode 100644 index 0000000000000..dc9946de3b160 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.26.0.jar.sha1 @@ -0,0 +1 @@ +79a86f258ede8625627e8fbdff07d1149c88a8e6 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-LICENSE.txt b/plugins/telemetry-otel/licenses/opentelemetry-sdk-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-NOTICE.txt b/plugins/telemetry-otel/licenses/opentelemetry-sdk-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.26.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.26.0.jar.sha1 new file mode 100644 index 0000000000000..2bd3e60a1faf6 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.26.0.jar.sha1 @@ -0,0 +1 @@ +b42359d2232f8d802d55153be5330b1d9e21ee15 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-LICENSE.txt b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-NOTICE.txt b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.26.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.26.0-alpha.jar.sha1 new file mode 100644 index 0000000000000..90bb8202c4c9d --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.26.0-alpha.jar.sha1 @@ -0,0 +1 @@ +a8abeaee240291cce9067f07569f151d11a6275a \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-LICENSE.txt b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-NOTICE.txt b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.26.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.26.0.jar.sha1 new file mode 100644 index 0000000000000..62396a603423f --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.26.0.jar.sha1 @@ -0,0 +1 @@ +8c4af22d7d92a3a79714be3f79724b0ab774ba9e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-LICENSE.txt b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-NOTICE.txt b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.26.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.26.0.jar.sha1 new file mode 100644 index 0000000000000..0fcebee353105 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.26.0.jar.sha1 @@ -0,0 +1 @@ +fcc5785b2cf2be897f31b927e24b53e46e377388 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-LICENSE.txt b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-NOTICE.txt b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.26.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.26.0-alpha.jar.sha1 new file mode 100644 index 0000000000000..47c7ece8c9f6c --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.26.0-alpha.jar.sha1 @@ -0,0 +1 @@ +1f4f963673f8209208f868666cd43e79b9a2dd15 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-LICENSE.txt b/plugins/telemetry-otel/licenses/opentelemetry-semconv-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-semconv-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-NOTICE.txt b/plugins/telemetry-otel/licenses/opentelemetry-semconv-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetryPlugin.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetryPlugin.java new file mode 100644 index 0000000000000..1c38c9dc8d6be --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetryPlugin.java @@ -0,0 +1,92 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry; + +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.plugins.Plugin; +import org.opensearch.plugins.TelemetryPlugin; +import org.opensearch.telemetry.metrics.MetricsTelemetry; +import org.opensearch.telemetry.tracing.OTelResourceProvider; +import org.opensearch.telemetry.tracing.OTelTelemetry; +import org.opensearch.telemetry.tracing.OTelTracingTelemetry; + +import java.util.Arrays; +import java.util.List; +import java.util.Optional; + +/** + * Telemetry plugin based on Otel + */ +public class OTelTelemetryPlugin extends Plugin implements TelemetryPlugin { + + static final String OTEL_TRACER_NAME = "otel"; + + /** + * span exporter batch size + */ + public static final Setting TRACER_EXPORTER_BATCH_SIZE_SETTING = Setting.intSetting( + "telemetry.otel.tracer.exporter.batch_size", + 512, + 1, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + /** + * span exporter max queue size + */ + public static final Setting TRACER_EXPORTER_MAX_QUEUE_SIZE_SETTING = Setting.intSetting( + "telemetry.otel.tracer.exporter.max_queue_size", + 2048, + 1, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + /** + * span exporter delay in seconds + */ + public static final Setting TRACER_EXPORTER_DELAY_SETTING = Setting.timeSetting( + "telemetry.otel.tracer.exporter.delay", + TimeValue.timeValueSeconds(2), + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + private final Settings settings; + + /** + * Creates Otel plugin + * @param settings cluster settings + */ + public OTelTelemetryPlugin(Settings settings) { + this.settings = settings; + } + + @Override + public List> getSettings() { + return Arrays.asList(TRACER_EXPORTER_BATCH_SIZE_SETTING, TRACER_EXPORTER_DELAY_SETTING, TRACER_EXPORTER_MAX_QUEUE_SIZE_SETTING); + } + + @Override + public Optional getTelemetry(TelemetrySettings settings) { + return Optional.of(telemetry()); + } + + @Override + public String getName() { + return OTEL_TRACER_NAME; + } + + private Telemetry telemetry() { + return new OTelTelemetry(new OTelTracingTelemetry(OTelResourceProvider.get(settings)), new MetricsTelemetry() { + }); + } + +} diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/package-info.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/package-info.java new file mode 100644 index 0000000000000..4545f0ef5990e --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains classes needed for telemetry. + */ +package org.opensearch.telemetry; diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelPropagatedSpan.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelPropagatedSpan.java new file mode 100644 index 0000000000000..5aa1069e60367 --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelPropagatedSpan.java @@ -0,0 +1,23 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +/** + * Propagated span through context propagation + */ +public class OTelPropagatedSpan extends OTelSpan { + + /** + * Creates OTelPropagatedSpan + * @param span otel propagated span + */ + public OTelPropagatedSpan(io.opentelemetry.api.trace.Span span) { + super(null, span, null); + } +} diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java new file mode 100644 index 0000000000000..292165979c2f2 --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java @@ -0,0 +1,78 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.trace.propagation.W3CTraceContextPropagator; +import io.opentelemetry.context.propagation.ContextPropagators; +import io.opentelemetry.exporter.logging.LoggingSpanExporter; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.resources.Resource; +import io.opentelemetry.sdk.trace.SdkTracerProvider; +import io.opentelemetry.sdk.trace.export.BatchSpanProcessor; +import io.opentelemetry.sdk.trace.export.SpanExporter; +import io.opentelemetry.sdk.trace.samplers.Sampler; +import io.opentelemetry.semconv.resource.attributes.ResourceAttributes; +import org.opensearch.common.settings.Settings; + +import java.util.concurrent.TimeUnit; + +import static org.opensearch.telemetry.OTelTelemetryPlugin.TRACER_EXPORTER_BATCH_SIZE_SETTING; +import static org.opensearch.telemetry.OTelTelemetryPlugin.TRACER_EXPORTER_DELAY_SETTING; +import static org.opensearch.telemetry.OTelTelemetryPlugin.TRACER_EXPORTER_MAX_QUEUE_SIZE_SETTING; + +/** + * This class encapsulates all OpenTelemetry related resources + */ +public final class OTelResourceProvider { + private OTelResourceProvider() {} + + /** + * Creates OpenTelemetry instance with default configuration + * @param settings cluster settings + * @return OpenTelemetry instance + */ + public static OpenTelemetry get(Settings settings) { + return get( + settings, + LoggingSpanExporter.create(), + ContextPropagators.create(W3CTraceContextPropagator.getInstance()), + Sampler.alwaysOn() + ); + } + + /** + * Creates OpenTelemetry instance with provided configuration + * @param settings cluster settings + * @param spanExporter span exporter instance + * @param contextPropagators context propagator instance + * @param sampler sampler instance + * @return Opentelemetry instance + */ + public static OpenTelemetry get(Settings settings, SpanExporter spanExporter, ContextPropagators contextPropagators, Sampler sampler) { + Resource resource = Resource.create(Attributes.of(ResourceAttributes.SERVICE_NAME, "OpenSearch")); + SdkTracerProvider sdkTracerProvider = SdkTracerProvider.builder() + .addSpanProcessor(spanProcessor(settings, spanExporter)) + .setResource(resource) + .setSampler(sampler) + .build(); + + return OpenTelemetrySdk.builder().setTracerProvider(sdkTracerProvider).setPropagators(contextPropagators).buildAndRegisterGlobal(); + } + + private static BatchSpanProcessor spanProcessor(Settings settings, SpanExporter spanExporter) { + return BatchSpanProcessor.builder(spanExporter) + .setScheduleDelay(TRACER_EXPORTER_DELAY_SETTING.get(settings).getSeconds(), TimeUnit.SECONDS) + .setMaxExportBatchSize(TRACER_EXPORTER_BATCH_SIZE_SETTING.get(settings)) + .setMaxQueueSize(TRACER_EXPORTER_MAX_QUEUE_SIZE_SETTING.get(settings)) + .build(); + } + +} diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelSpan.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelSpan.java new file mode 100644 index 0000000000000..ba63df4ae47a1 --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelSpan.java @@ -0,0 +1,76 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.api.trace.StatusCode; + +/** + * Default implementation of {@link Span} using Otel span. It keeps a reference of OpenTelemetry Span and handles span + * lifecycle management by delegating calls to it. + */ +class OTelSpan extends AbstractSpan { + + private final Span delegateSpan; + + public OTelSpan(String spanName, Span span, org.opensearch.telemetry.tracing.Span parentSpan) { + super(spanName, parentSpan); + this.delegateSpan = span; + } + + @Override + public void endSpan() { + delegateSpan.end(); + } + + @Override + public void addAttribute(String key, String value) { + delegateSpan.setAttribute(key, value); + } + + @Override + public void addAttribute(String key, Long value) { + delegateSpan.setAttribute(key, value); + } + + @Override + public void addAttribute(String key, Double value) { + delegateSpan.setAttribute(key, value); + } + + @Override + public void addAttribute(String key, Boolean value) { + delegateSpan.setAttribute(key, value); + } + + @Override + public void setError(Exception exception) { + delegateSpan.setStatus(StatusCode.ERROR, exception.getMessage()); + } + + @Override + public void addEvent(String event) { + delegateSpan.addEvent(event); + } + + @Override + public String getTraceId() { + return delegateSpan.getSpanContext().getTraceId(); + } + + @Override + public String getSpanId() { + return delegateSpan.getSpanContext().getSpanId(); + } + + io.opentelemetry.api.trace.Span getDelegateSpan() { + return delegateSpan; + } + +} diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTelemetry.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTelemetry.java new file mode 100644 index 0000000000000..282fabd43346b --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTelemetry.java @@ -0,0 +1,41 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import org.opensearch.telemetry.Telemetry; +import org.opensearch.telemetry.metrics.MetricsTelemetry; + +/** + * Otel implementation of Telemetry + */ +public class OTelTelemetry implements Telemetry { + + private final TracingTelemetry tracingTelemetry; + private final MetricsTelemetry metricsTelemetry; + + /** + * Creates Telemetry instance + * @param tracingTelemetry tracing telemetry + * @param metricsTelemetry metrics telemetry + */ + public OTelTelemetry(TracingTelemetry tracingTelemetry, MetricsTelemetry metricsTelemetry) { + this.tracingTelemetry = tracingTelemetry; + this.metricsTelemetry = metricsTelemetry; + } + + @Override + public TracingTelemetry getTracingTelemetry() { + return tracingTelemetry; + } + + @Override + public MetricsTelemetry getMetricsTelemetry() { + return metricsTelemetry; + } +} diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingContextPropagator.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingContextPropagator.java new file mode 100644 index 0000000000000..15609b39b6b94 --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingContextPropagator.java @@ -0,0 +1,75 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.context.Context; +import io.opentelemetry.context.propagation.TextMapGetter; +import io.opentelemetry.context.propagation.TextMapSetter; + +import java.util.Map; +import java.util.function.BiConsumer; + +/** + * Otel implementation of TracingContextPropagator + */ +public class OTelTracingContextPropagator implements TracingContextPropagator { + + private final OpenTelemetry openTelemetry; + + /** + * Creates OTelTracingContextPropagator instance + * @param openTelemetry Otel OpenTelemetry instance + */ + public OTelTracingContextPropagator(OpenTelemetry openTelemetry) { + this.openTelemetry = openTelemetry; + } + + @Override + public Span extract(Map props) { + Context context = openTelemetry.getPropagators().getTextMapPropagator().extract(Context.current(), props, TEXT_MAP_GETTER); + if (context != null) { + io.opentelemetry.api.trace.Span span = io.opentelemetry.api.trace.Span.fromContext(context); + return new OTelPropagatedSpan(span); + } + return null; + } + + @Override + public void inject(Span currentSpan, BiConsumer setter) { + openTelemetry.getPropagators().getTextMapPropagator().inject(context((OTelSpan) currentSpan), setter, TEXT_MAP_SETTER); + + } + + private static Context context(OTelSpan oTelSpan) { + return Context.current().with(io.opentelemetry.api.trace.Span.wrap(oTelSpan.getDelegateSpan().getSpanContext())); + } + + private static final TextMapSetter> TEXT_MAP_SETTER = (carrier, key, value) -> { + if (carrier != null) { + carrier.accept(key, value); + } + }; + + private static final TextMapGetter> TEXT_MAP_GETTER = new TextMapGetter<>() { + @Override + public Iterable keys(Map headers) { + return headers.keySet(); + } + + @Override + public String get(Map headers, String key) { + if (headers != null && headers.containsKey(key)) { + return headers.get(key); + } + return null; + } + }; + +} diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingTelemetry.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingTelemetry.java new file mode 100644 index 0000000000000..8a0034e098461 --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingTelemetry.java @@ -0,0 +1,68 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.context.Context; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import java.io.Closeable; +import java.io.IOException; + +/** + * OTel based Telemetry provider + */ +public class OTelTracingTelemetry implements TracingTelemetry { + + private static final Logger logger = LogManager.getLogger(OTelTracingTelemetry.class); + + private final OpenTelemetry openTelemetry; + private final io.opentelemetry.api.trace.Tracer otelTracer; + + /** + * Creates OTel based Telemetry + * @param openTelemetry OpenTelemetry instance + */ + public OTelTracingTelemetry(OpenTelemetry openTelemetry) { + this.openTelemetry = openTelemetry; + this.otelTracer = openTelemetry.getTracer("os-tracer"); + + } + + @Override + public void close() { + try { + ((Closeable) openTelemetry).close(); + } catch (IOException e) { + logger.warn("Error while closing Opentelemetry", e); + } + } + + @Override + public Span createSpan(String spanName, Span parentSpan) { + return createOtelSpan(spanName, parentSpan); + } + + @Override + public TracingContextPropagator getContextPropagator() { + return new OTelTracingContextPropagator(openTelemetry); + } + + private Span createOtelSpan(String spanName, Span parentSpan) { + io.opentelemetry.api.trace.Span otelSpan = otelSpan(spanName, parentSpan); + return new OTelSpan(spanName, otelSpan, parentSpan); + } + + io.opentelemetry.api.trace.Span otelSpan(String spanName, Span parentOTelSpan) { + return parentOTelSpan == null || !(parentOTelSpan instanceof OTelSpan) + ? otelTracer.spanBuilder(spanName).startSpan() + : otelTracer.spanBuilder(spanName).setParent(Context.current().with(((OTelSpan) parentOTelSpan).getDelegateSpan())).startSpan(); + } +} diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/package-info.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/package-info.java new file mode 100644 index 0000000000000..4ac1e4c212c81 --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains classes needed for tracing requests. + */ +package org.opensearch.telemetry.tracing; diff --git a/sandbox/plugins/custom-codecs/src/main/plugin-metadata/plugin-security.policy b/plugins/telemetry-otel/src/main/plugin-metadata/plugin-security.policy similarity index 67% rename from sandbox/plugins/custom-codecs/src/main/plugin-metadata/plugin-security.policy rename to plugins/telemetry-otel/src/main/plugin-metadata/plugin-security.policy index 8161010cfa897..0f556121915bb 100644 --- a/sandbox/plugins/custom-codecs/src/main/plugin-metadata/plugin-security.policy +++ b/plugins/telemetry-otel/src/main/plugin-metadata/plugin-security.policy @@ -6,6 +6,7 @@ * compatible open source license. */ -grant codeBase "${codebase.zstd-jni}" { - permission java.lang.RuntimePermission "loadLibrary.*"; +grant { }; + + diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/OTelTelemetryPluginTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/OTelTelemetryPluginTests.java new file mode 100644 index 0000000000000..c6ffba04ac285 --- /dev/null +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/OTelTelemetryPluginTests.java @@ -0,0 +1,50 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.telemetry.tracing.OTelTracingTelemetry; +import org.opensearch.telemetry.tracing.TracingTelemetry; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.Optional; + +import static org.opensearch.telemetry.OTelTelemetryPlugin.OTEL_TRACER_NAME; +import static org.opensearch.telemetry.OTelTelemetryPlugin.TRACER_EXPORTER_BATCH_SIZE_SETTING; +import static org.opensearch.telemetry.OTelTelemetryPlugin.TRACER_EXPORTER_DELAY_SETTING; +import static org.opensearch.telemetry.OTelTelemetryPlugin.TRACER_EXPORTER_MAX_QUEUE_SIZE_SETTING; + +public class OTelTelemetryPluginTests extends OpenSearchTestCase { + + public void testGetTelemetry() { + Set> allTracerSettings = new HashSet<>(); + ClusterSettings.FEATURE_FLAGGED_CLUSTER_SETTINGS.get(List.of(FeatureFlags.TELEMETRY)).stream().forEach((allTracerSettings::add)); + Settings settings = Settings.builder().build(); + OTelTelemetryPlugin oTelTracerModulePlugin = new OTelTelemetryPlugin(settings); + Optional tracer = oTelTracerModulePlugin.getTelemetry(null); + + assertEquals(OTEL_TRACER_NAME, oTelTracerModulePlugin.getName()); + TracingTelemetry tracingTelemetry = tracer.get().getTracingTelemetry(); + assertTrue(tracingTelemetry instanceof OTelTracingTelemetry); + assertEquals( + Arrays.asList(TRACER_EXPORTER_BATCH_SIZE_SETTING, TRACER_EXPORTER_DELAY_SETTING, TRACER_EXPORTER_MAX_QUEUE_SIZE_SETTING), + oTelTracerModulePlugin.getSettings() + ); + tracingTelemetry.close(); + + } + +} diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelSpanTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelSpanTests.java new file mode 100644 index 0000000000000..ac849e620673a --- /dev/null +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelSpanTests.java @@ -0,0 +1,92 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.api.trace.SpanContext; +import io.opentelemetry.api.trace.TraceFlags; +import io.opentelemetry.api.trace.TraceState; +import org.opensearch.test.OpenSearchTestCase; + +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class OTelSpanTests extends OpenSearchTestCase { + + private static final String TRACE_ID = "4aa59968f31dcbff7807741afa9d7d62"; + private static final String SPAN_ID = "bea205cd25756b5e"; + + public void testEndSpanTest() { + Span mockSpan = getMockSpan(); + OTelSpan oTelSpan = new OTelSpan("spanName", mockSpan, null); + oTelSpan.endSpan(); + verify(mockSpan).end(); + } + + public void testAddAttributeString() { + Span mockSpan = getMockSpan(); + OTelSpan oTelSpan = new OTelSpan("spanName", mockSpan, null); + oTelSpan.addAttribute("key", "value"); + + verify(mockSpan).setAttribute("key", "value"); + } + + public void testAddAttributeLong() { + Span mockSpan = getMockSpan(); + OTelSpan oTelSpan = new OTelSpan("spanName", mockSpan, null); + oTelSpan.addAttribute("key", 1L); + + verify(mockSpan).setAttribute("key", 1L); + } + + public void testAddAttributeDouble() { + Span mockSpan = getMockSpan(); + OTelSpan oTelSpan = new OTelSpan("spanName", mockSpan, null); + oTelSpan.addAttribute("key", 1.0); + + verify(mockSpan).setAttribute("key", 1.0); + } + + public void testAddAttributeBoolean() { + Span mockSpan = getMockSpan(); + OTelSpan oTelSpan = new OTelSpan("spanName", mockSpan, null); + oTelSpan.addAttribute("key", true); + + verify(mockSpan).setAttribute("key", true); + } + + public void testAddEvent() { + Span mockSpan = getMockSpan(); + OTelSpan oTelSpan = new OTelSpan("spanName", mockSpan, null); + oTelSpan.addEvent("eventName"); + + verify(mockSpan).addEvent("eventName"); + } + + public void testGetTraceId() { + Span mockSpan = getMockSpan(); + OTelSpan oTelSpan = new OTelSpan("spanName", mockSpan, null); + + assertEquals(TRACE_ID, oTelSpan.getTraceId()); + } + + public void testGetSpanId() { + Span mockSpan = getMockSpan(); + OTelSpan oTelSpan = new OTelSpan("spanName", mockSpan, null); + + assertEquals(SPAN_ID, oTelSpan.getSpanId()); + } + + private Span getMockSpan() { + Span mockSpan = mock(Span.class); + when(mockSpan.getSpanContext()).thenReturn(SpanContext.create(TRACE_ID, SPAN_ID, TraceFlags.getDefault(), TraceState.getDefault())); + return mockSpan; + } +} diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingContextPropagatorTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingContextPropagatorTests.java new file mode 100644 index 0000000000000..1f76b0b9def18 --- /dev/null +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingContextPropagatorTests.java @@ -0,0 +1,54 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.api.trace.SpanContext; +import io.opentelemetry.api.trace.TraceFlags; +import io.opentelemetry.api.trace.TraceState; +import io.opentelemetry.api.trace.propagation.W3CTraceContextPropagator; +import io.opentelemetry.context.propagation.ContextPropagators; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.HashMap; +import java.util.Map; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class OTelTracingContextPropagatorTests extends OpenSearchTestCase { + + private static final String TRACE_ID = "4aa59968f31dcbff7807741afa9d7d62"; + private static final String SPAN_ID = "bea205cd25756b5e"; + + public void testAddTracerContextToHeader() { + Span mockSpan = mock(Span.class); + when(mockSpan.getSpanContext()).thenReturn(SpanContext.create(TRACE_ID, SPAN_ID, TraceFlags.getDefault(), TraceState.getDefault())); + OTelSpan span = new OTelSpan("spanName", mockSpan, null); + Map requestHeaders = new HashMap<>(); + OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); + when(mockOpenTelemetry.getPropagators()).thenReturn(ContextPropagators.create(W3CTraceContextPropagator.getInstance())); + TracingContextPropagator tracingContextPropagator = new OTelTracingContextPropagator(mockOpenTelemetry); + + tracingContextPropagator.inject(span, (key, value) -> requestHeaders.put(key, value)); + assertEquals("00-" + TRACE_ID + "-" + SPAN_ID + "-00", requestHeaders.get("traceparent")); + } + + public void testExtractTracerContextFromHeader() { + Map requestHeaders = new HashMap<>(); + requestHeaders.put("traceparent", "00-" + TRACE_ID + "-" + SPAN_ID + "-00"); + OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); + when(mockOpenTelemetry.getPropagators()).thenReturn(ContextPropagators.create(W3CTraceContextPropagator.getInstance())); + TracingContextPropagator tracingContextPropagator = new OTelTracingContextPropagator(mockOpenTelemetry); + org.opensearch.telemetry.tracing.Span span = tracingContextPropagator.extract(requestHeaders); + assertEquals(TRACE_ID, span.getTraceId()); + assertEquals(SPAN_ID, span.getSpanId()); + } +} diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingTelemetryTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingTelemetryTests.java new file mode 100644 index 0000000000000..7dec7824b9790 --- /dev/null +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingTelemetryTests.java @@ -0,0 +1,68 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.trace.SpanBuilder; +import io.opentelemetry.api.trace.Tracer; +import org.opensearch.test.OpenSearchTestCase; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class OTelTracingTelemetryTests extends OpenSearchTestCase { + + public void testCreateSpanWithoutParent() { + OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); + Tracer mockTracer = mock(Tracer.class); + when(mockOpenTelemetry.getTracer("os-tracer")).thenReturn(mockTracer); + SpanBuilder mockSpanBuilder = mock(SpanBuilder.class); + when(mockTracer.spanBuilder("span_name")).thenReturn(mockSpanBuilder); + when(mockSpanBuilder.startSpan()).thenReturn(mock(io.opentelemetry.api.trace.Span.class)); + + TracingTelemetry tracingTelemetry = new OTelTracingTelemetry(mockOpenTelemetry); + Span span = tracingTelemetry.createSpan("span_name", null); + + verify(mockSpanBuilder, never()).setParent(any()); + assertNull(span.getParentSpan()); + } + + public void testCreateSpanWithParent() { + OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); + Tracer mockTracer = mock(Tracer.class); + when(mockOpenTelemetry.getTracer("os-tracer")).thenReturn(mockTracer); + SpanBuilder mockSpanBuilder = mock(SpanBuilder.class); + when(mockTracer.spanBuilder("span_name")).thenReturn(mockSpanBuilder); + when(mockSpanBuilder.setParent(any())).thenReturn(mockSpanBuilder); + when(mockSpanBuilder.startSpan()).thenReturn(mock(io.opentelemetry.api.trace.Span.class)); + + Span parentSpan = new OTelSpan("parent_span", mock(io.opentelemetry.api.trace.Span.class), null); + + TracingTelemetry tracingTelemetry = new OTelTracingTelemetry(mockOpenTelemetry); + Span span = tracingTelemetry.createSpan("span_name", parentSpan); + + verify(mockSpanBuilder).setParent(any()); + assertNotNull(span.getParentSpan()); + assertEquals("parent_span", span.getParentSpan().getSpanName()); + } + + public void testGetContextPropagator() { + OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); + Tracer mockTracer = mock(Tracer.class); + when(mockOpenTelemetry.getTracer("os-tracer")).thenReturn(mockTracer); + + TracingTelemetry tracingTelemetry = new OTelTracingTelemetry(mockOpenTelemetry); + + assertTrue(tracingTelemetry.getContextPropagator() instanceof OTelTracingContextPropagator); + } + +} diff --git a/plugins/transport-nio/build.gradle b/plugins/transport-nio/build.gradle index 42d0d40a1d449..8c0ee8ba718ac 100644 --- a/plugins/transport-nio/build.gradle +++ b/plugins/transport-nio/build.gradle @@ -62,7 +62,6 @@ thirdPartyAudit { 'com.aayushatharva.brotli4j.decoder.DecoderJNI$Status', 'com.aayushatharva.brotli4j.decoder.DecoderJNI$Wrapper', 'com.aayushatharva.brotli4j.encoder.BrotliEncoderChannel', - 'com.aayushatharva.brotli4j.encoder.Encoder', 'com.aayushatharva.brotli4j.encoder.Encoder$Mode', 'com.aayushatharva.brotli4j.encoder.Encoder$Parameters', diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.93.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.93.Final.jar.sha1 deleted file mode 100644 index 5c5a17a9466f1..0000000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.93.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -87fac21f4ef95157866b07b48e3c707a2f13c581 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.94.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.94.Final.jar.sha1 new file mode 100644 index 0000000000000..05b1c2a4d614e --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.94.Final.jar.sha1 @@ -0,0 +1 @@ +eec248b26f16e888688e5bb37b7eeda76b78d2f7 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.93.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.93.Final.jar.sha1 deleted file mode 100644 index 2b12a111335a2..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.93.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -503badb458b6586632be8d1f81aa4e5ab99a80fc \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.94.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.94.Final.jar.sha1 new file mode 100644 index 0000000000000..baa7e25f1ac49 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.94.Final.jar.sha1 @@ -0,0 +1 @@ +c70ef20ca338558147887df60f46341bc47f6900 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.93.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.93.Final.jar.sha1 deleted file mode 100644 index 6719e882e40fe..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.93.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -36acf0c94d03eb6ecef78a749a32cbb7dc0c57b4 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.94.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.94.Final.jar.sha1 new file mode 100644 index 0000000000000..8c018be2565e5 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.94.Final.jar.sha1 @@ -0,0 +1 @@ +9e5404764092c1f6305ad5719078f46ab228d587 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.93.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.93.Final.jar.sha1 deleted file mode 100644 index 2324a54dc5735..0000000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.93.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1cfc49b91b0d3ddb30c9f7d8467e5d02ae8babdf \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.94.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.94.Final.jar.sha1 new file mode 100644 index 0000000000000..b787338551ede --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.94.Final.jar.sha1 @@ -0,0 +1 @@ +ad4ecf779ebc794cd351f57792f56ea01387b868 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.93.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.93.Final.jar.sha1 deleted file mode 100644 index 54e5b22047812..0000000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.93.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -10f7ed9d8e1bfcba416074c70e5388be96116bfc \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.94.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.94.Final.jar.sha1 new file mode 100644 index 0000000000000..b08e85ba7adf8 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.94.Final.jar.sha1 @@ -0,0 +1 @@ +cd9121ce24d6d3f2898946d04b0ef3ec548b00b4 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.93.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.93.Final.jar.sha1 deleted file mode 100644 index c795762c2d7f1..0000000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.93.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3860e99075f9e078364ed38f6d6fc8825b7a168a \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.94.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.94.Final.jar.sha1 new file mode 100644 index 0000000000000..4c9e4dda2b852 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.94.Final.jar.sha1 @@ -0,0 +1 @@ +e96f649e8e9dcb29a1f8e95328b99c9eb6cf76c2 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.93.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.93.Final.jar.sha1 deleted file mode 100644 index 687cade3c7b3a..0000000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.93.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1a0894915c8027ce83b4d6a811c4e765955efd15 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.94.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.94.Final.jar.sha1 new file mode 100644 index 0000000000000..ed7760b8e15d1 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.94.Final.jar.sha1 @@ -0,0 +1 @@ +ec783a737f96991a87b1d5794e2f9eb2024d708a \ No newline at end of file diff --git a/qa/mixed-cluster/build.gradle b/qa/mixed-cluster/build.gradle index 55f900c52f2c2..d64bf245dbf8f 100644 --- a/qa/mixed-cluster/build.gradle +++ b/qa/mixed-cluster/build.gradle @@ -55,6 +55,7 @@ for (Version bwcVersion : BuildParams.bwcVersions.wireCompatible) { } String baseName = "v${bwcVersion}" + String bwcVersionStr = "${bwcVersion}" /* This project runs the core REST tests against a 4 node cluster where two of the nodes has a different minor. */ @@ -86,6 +87,7 @@ for (Version bwcVersion : BuildParams.bwcVersions.wireCompatible) { nonInputProperties.systemProperty('tests.rest.cluster', "${-> testClusters."${baseName}".allHttpSocketURI.join(",")}") nonInputProperties.systemProperty('tests.clustername', "${-> testClusters."${baseName}".getName()}") } + systemProperty 'tests.upgrade_from_version', bwcVersionStr systemProperty 'tests.path.repo', "${buildDir}/cluster/shared/repo/${baseName}" onlyIf { project.bwc_tests_enabled } } diff --git a/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java b/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java index a6675a6d0ddb5..f1f469544f634 100644 --- a/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java +++ b/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java @@ -66,6 +66,9 @@ public class IndexingIT extends OpenSearchRestTestCase { + protected static final Version UPGRADE_FROM_VERSION = Version.fromString(System.getProperty("tests.upgrade_from_version")); + + private int indexDocs(String index, final int idStart, final int numDocs) throws IOException { for (int i = 0; i < numDocs; i++) { final int id = idStart + i; @@ -110,16 +113,18 @@ private void printClusterRouting() throws IOException, ParseException { /** * This test verifies that segment replication does not break when primary shards are on lower OS version. It does this * by verifying replica shards contains same number of documents as primary's. - * - * @throws Exception */ public void testIndexingWithPrimaryOnBwcNodes() throws Exception { + if (UPGRADE_FROM_VERSION.before(Version.V_2_4_0)) { + logger.info("--> Skip test for version {} where segment replication feature is not available", UPGRADE_FROM_VERSION); + return; + } Nodes nodes = buildNodeAndVersions(); assumeFalse("new nodes is empty", nodes.getNewNodes().isEmpty()); logger.info("cluster discovered:\n {}", nodes.toString()); final List bwcNamesList = nodes.getBWCNodes().stream().map(Node::getNodeName).collect(Collectors.toList()); final String bwcNames = bwcNamesList.stream().collect(Collectors.joining(",")); - // Exclude bwc nodes from allocation so that primaries gets allocated on current version + // Update allocation settings so that primaries gets allocated only on nodes running on older version Settings.Builder settings = Settings.builder() .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) @@ -133,7 +138,7 @@ public void testIndexingWithPrimaryOnBwcNodes() throws Exception { try (RestClient nodeClient = buildClient(restClientSettings(), nodes.getNewNodes().stream().map(Node::getPublishAddress).toArray(HttpHost[]::new))) { - logger.info("allowing replica shards assignment on bwc nodes"); + logger.info("Remove allocation include settings so that shards can be allocated on current version nodes"); updateIndexSettings(index, Settings.builder().putNull("index.routing.allocation.include._name")); // Add replicas so that it can be assigned on higher OS version nodes. updateIndexSettings(index, Settings.builder().put("index.number_of_replicas", 2)); @@ -154,13 +159,15 @@ public void testIndexingWithPrimaryOnBwcNodes() throws Exception { /** - * This test creates a cluster with primary on older version but due to {@link org.opensearch.cluster.routing.allocation.decider.NodeVersionAllocationDecider}; + * This test creates a cluster with primary on higher version but due to {@link org.opensearch.cluster.routing.allocation.decider.NodeVersionAllocationDecider}; * replica shard allocation on lower OpenSearch version is prevented. Thus, this test though cover the use case where * primary shard containing nodes are running on higher OS version while replicas are unassigned. - * - * @throws Exception */ public void testIndexingWithReplicaOnBwcNodes() throws Exception { + if (UPGRADE_FROM_VERSION.before(Version.V_2_4_0)) { + logger.info("--> Skip test for version {} where segment replication feature is not available", UPGRADE_FROM_VERSION); + return; + } Nodes nodes = buildNodeAndVersions(); assumeFalse("new nodes is empty", nodes.getNewNodes().isEmpty()); logger.info("cluster discovered:\n {}", nodes.toString()); diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java index a758b8e4ccd72..93c0bc96a5183 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java @@ -38,22 +38,22 @@ import org.opensearch.client.Response; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Booleans; +import org.opensearch.common.io.Streams; import org.opensearch.common.settings.Settings; +import org.opensearch.index.codec.CodecService; +import org.opensearch.index.engine.EngineConfig; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.rest.yaml.ObjectPath; import java.io.IOException; import java.net.URISyntaxException; import java.nio.charset.StandardCharsets; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; import static org.opensearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; import static org.opensearch.rest.action.search.RestSearchAction.TOTAL_HITS_AS_INT_PARAM; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.either; /** * Basic test that indexed documents survive the rolling restart. See @@ -84,54 +84,51 @@ private void printClusterNodes() throws IOException, ParseException, URISyntaxEx } // Verifies that for each shard copy holds same document count across all containing nodes. - private void waitForSearchableDocs(String index, int shardCount) throws Exception { - Map primaryShardToNodeIDMap = new HashMap<>(); - Map replicaShardToNodeIDMap = new HashMap<>(); + private void waitForSearchableDocs(String index, int shardCount, int replicaCount) throws Exception { + assertTrue(shardCount > 0); + assertTrue(replicaCount > 0); waitForClusterHealthWithNoShardMigration(index, "green"); logger.info("--> _cat/shards before search \n{}", EntityUtils.toString(client().performRequest(new Request("GET", "/_cat/shards?v")).getEntity())); - Request request = new Request("GET", index + "/_stats"); - request.addParameter("level", "shards"); - Response response = client().performRequest(request); - for (int shardNumber = 0; shardNumber < shardCount; shardNumber++) { - List shardStats = ObjectPath.createFromResponse(response).evaluate("indices." + index + ".shards." + shardNumber); - for (Object shard : shardStats) { - final String nodeId = ObjectPath.evaluate(shard, "routing.node"); - final Boolean primary = ObjectPath.evaluate(shard, "routing.primary"); - if (primary) { - primaryShardToNodeIDMap.putIfAbsent(shardNumber, nodeId); - } else { - replicaShardToNodeIDMap.putIfAbsent(shardNumber, nodeId); + // Verify segment replication stats + verifySegmentStats(index); + + // Verify segment store + assertBusy(() -> { + /** + * Use default tabular output and sort response based on shard,segment,primaryOrReplica columns to allow line by + * line parsing where records related to a segment (e.g. _0) are chunked together with first record belonging + * to primary while remaining *replicaCount* records belongs to replica copies + * */ + Request segrepStatsRequest = new Request("GET", "/_cat/segments/" + index + "?s=shard,segment,primaryOrReplica"); + segrepStatsRequest.addParameter("h", "index,shard,primaryOrReplica,segment,docs.count"); + Response segrepStatsResponse = client().performRequest(segrepStatsRequest); + logger.info("--> _cat/segments response\n {}", EntityUtils.toString(segrepStatsResponse.getEntity())); + List responseList = Streams.readAllLines(segrepStatsResponse.getEntity().getContent()); + for (int segmentsIndex=0; segmentsIndex < responseList.size();) { + String[] primaryRow = responseList.get(segmentsIndex++).split(" +"); + String shardId = primaryRow[0] + primaryRow[1]; + assertTrue(primaryRow[2].equals("p")); + for(int replicaIndex = 1; replicaIndex <= replicaCount; replicaIndex++) { + String[] replicaRow = responseList.get(segmentsIndex).split(" +"); + String replicaShardId = replicaRow[0] + replicaRow[1]; + // When segment has 0 doc count, not all replica copies posses that segment. Skip to next segment + if (replicaRow[2].equals("p")) { + assertTrue(primaryRow[4].equals("0")); + break; + } + // verify same shard id + assertTrue(replicaShardId.equals(shardId)); + // verify replica row + assertTrue(replicaRow[2].equals("r")); + // Verify segment name matches e.g. _0 + assertTrue(replicaRow[3].equals(primaryRow[3])); + // Verify doc count matches + assertTrue(replicaRow[4].equals(primaryRow[4])); + segmentsIndex++; } } - } - logger.info("--> primaryShardToNodeIDMap {}", primaryShardToNodeIDMap); - logger.info("--> replicaShardToNodeIDMap {}", replicaShardToNodeIDMap); - - for (int shardNumber = 0; shardNumber < shardCount; shardNumber++) { - logger.info("--> Verify doc count for shard number {}", shardNumber); - Request searchTestIndexRequest = new Request("POST", "/" + index + "/_search"); - searchTestIndexRequest.addParameter(TOTAL_HITS_AS_INT_PARAM, "true"); - searchTestIndexRequest.addParameter("filter_path", "hits.total"); - searchTestIndexRequest.addParameter("preference", "_shards:" + shardNumber + "|_only_nodes:" + primaryShardToNodeIDMap.get(shardNumber)); - Response searchTestIndexResponse = client().performRequest(searchTestIndexRequest); - final int primaryHits = ObjectPath.createFromResponse(searchTestIndexResponse).evaluate("hits.total"); - logger.info("--> primaryHits {}", primaryHits); - final int shardNum = shardNumber; - // Verify replica shard doc count only when available. - if (replicaShardToNodeIDMap.get(shardNum) != null) { - assertBusy(() -> { - Request replicaRequest = new Request("POST", "/" + index + "/_search"); - replicaRequest.addParameter(TOTAL_HITS_AS_INT_PARAM, "true"); - replicaRequest.addParameter("filter_path", "hits.total"); - replicaRequest.addParameter("preference", "_shards:" + shardNum + "|_only_nodes:" + replicaShardToNodeIDMap.get(shardNum)); - Response replicaResponse = client().performRequest(replicaRequest); - int replicaHits = ObjectPath.createFromResponse(replicaResponse).evaluate("hits.total"); - logger.info("--> ReplicaHits {}", replicaHits); - assertEquals(primaryHits, replicaHits); - }, 1, TimeUnit.MINUTES); - } - } + }, 1, TimeUnit.MINUTES); } private void waitForClusterHealthWithNoShardMigration(String indexName, String status) throws IOException { @@ -145,6 +142,18 @@ private void waitForClusterHealthWithNoShardMigration(String indexName, String s client().performRequest(waitForStatus); } + private void verifySegmentStats(String indexName) throws Exception { + assertBusy(() -> { + Request segrepStatsRequest = new Request("GET", "/_cat/segment_replication/" + indexName); + segrepStatsRequest.addParameter("h", "shardId,target_node,checkpoints_behind"); + Response segrepStatsResponse = client().performRequest(segrepStatsRequest); + for (String statLine : Streams.readAllLines(segrepStatsResponse.getEntity().getContent())) { + String[] elements = statLine.split(" +"); + assertEquals("Replica shard " + elements[0] + "not upto date with primary ", 0, Integer.parseInt(elements[2])); + } + }, 1, TimeUnit.MINUTES); + } + public void testIndexing() throws IOException, ParseException { switch (CLUSTER_TYPE) { case OLD: @@ -239,9 +248,13 @@ public void testIndexing() throws IOException, ParseException { * @throws Exception */ public void testIndexingWithSegRep() throws Exception { + if (UPGRADE_FROM_VERSION.before(Version.V_2_4_0)) { + logger.info("--> Skip test for version {} where segment replication feature is not available", UPGRADE_FROM_VERSION); + return; + } final String indexName = "test-index-segrep"; final int shardCount = 3; - final int replicaCount = 1; + final int replicaCount = 2; logger.info("--> Case {}", CLUSTER_TYPE); printClusterNodes(); logger.info("--> _cat/shards before test execution \n{}", EntityUtils.toString(client().performRequest(new Request("GET", "/_cat/shards?v")).getEntity())); @@ -251,6 +264,10 @@ public void testIndexingWithSegRep() throws Exception { .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), shardCount) .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), replicaCount) .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put( + EngineConfig.INDEX_CODEC_SETTING.getKey(), + randomFrom(CodecService.DEFAULT_CODEC, CodecService.BEST_COMPRESSION_CODEC, CodecService.LUCENE_DEFAULT_CODEC) + ) .put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms"); createIndex(indexName, settings.build()); waitForClusterHealthWithNoShardMigration(indexName, "green"); @@ -285,7 +302,7 @@ public void testIndexingWithSegRep() throws Exception { throw new UnsupportedOperationException("Unknown cluster type [" + CLUSTER_TYPE + "]"); } - waitForSearchableDocs(indexName, shardCount); + waitForSearchableDocs(indexName, shardCount, replicaCount); assertCount(indexName, expectedCount); if (CLUSTER_TYPE != ClusterType.OLD) { @@ -296,17 +313,16 @@ public void testIndexingWithSegRep() throws Exception { toBeDeleted.addParameter("refresh", "true"); toBeDeleted.setJsonEntity("{\"f1\": \"delete-me\"}"); client().performRequest(toBeDeleted); - waitForSearchableDocs(indexName, shardCount); + waitForSearchableDocs(indexName, shardCount, replicaCount); assertCount(indexName, expectedCount + 6); logger.info("--> Delete previously added doc and verify doc count"); Request delete = new Request("DELETE", "/" + indexName + "/_doc/to_be_deleted"); delete.addParameter("refresh", "true"); client().performRequest(delete); - waitForSearchableDocs(indexName, shardCount); + waitForSearchableDocs(indexName, shardCount, replicaCount); assertCount(indexName, expectedCount + 5); } - logger.info("--> _cat/shards post execution \n{}", EntityUtils.toString(client().performRequest(new Request("GET", "/_cat/shards?v")).getEntity())); } public void testAutoIdWithOpTypeCreate() throws IOException { diff --git a/qa/wildfly/build.gradle b/qa/wildfly/build.gradle index a2a13165ca10c..391d2c78b489b 100644 --- a/qa/wildfly/build.gradle +++ b/qa/wildfly/build.gradle @@ -40,25 +40,32 @@ apply plugin: 'opensearch.internal-distribution-download' testFixtures.useFixture() dependencies { - providedCompile 'javax.enterprise:cdi-api:2.0' - providedCompile "jakarta.annotation:jakarta.annotation-api:${versions.jakarta_annotation}" - providedCompile 'jakarta.ws.rs:jakarta.ws.rs-api:2.1.3' - api('org.jboss.resteasy:resteasy-jackson2-provider:3.0.26.Final') { - exclude module: 'jackson-annotations' - exclude module: 'jackson-core' - exclude module: 'jackson-databind' - exclude module: 'jackson-jaxrs-json-provider' + providedCompile('jakarta.enterprise:jakarta.enterprise.cdi-api:4.0.1') { + exclude module: 'jakarta.annotation-api' + } + providedCompile 'jakarta.ws.rs:jakarta.ws.rs-api:3.1.0' + providedCompile "org.jboss.resteasy:resteasy-core:${versions.resteasy}" + providedCompile "org.jboss.resteasy:resteasy-core-spi:${versions.resteasy}" + api("org.jboss.resteasy:resteasy-jackson2-provider:${versions.resteasy}") { + exclude module: 'jakarta.activation-api' + exclude group: 'com.fasterxml.jackson' + exclude group: 'com.fasterxml.jackson.core' + exclude group: 'com.fasterxml.jackson.dataformat' + exclude group: 'com.fasterxml.jackson.module' } api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" - api "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" - api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" - api "com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:${versions.jackson}" - api "com.fasterxml.jackson.jaxrs:jackson-jaxrs-base:${versions.jackson}" - api "com.fasterxml.jackson.module:jackson-module-jaxb-annotations:${versions.jackson}" + api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}" + api "com.fasterxml.jackson.jakarta.rs:jackson-jakarta-rs-base:${versions.jackson}" + api "com.fasterxml.jackson.jakarta.rs:jackson-jakarta-rs-json-provider:${versions.jackson}" + api "com.github.fge:json-patch:1.9" api "org.apache.logging.log4j:log4j-api:${versions.log4j}" api "org.apache.logging.log4j:log4j-core:${versions.log4j}" - api project(path: ':client:rest-high-level') - testImplementation project(':test:framework') + api(project(path: ':client:rest-high-level')) { + exclude module: 'jakarta.annotation-api' + } + testImplementation(project(':test:framework')) { + exclude module: 'jakarta.annotation-api' + } } war { diff --git a/qa/wildfly/docker-compose.yml b/qa/wildfly/docker-compose.yml index 96f168ba3505c..b0f1609f01e72 100644 --- a/qa/wildfly/docker-compose.yml +++ b/qa/wildfly/docker-compose.yml @@ -2,7 +2,7 @@ version: '3.7' services: wildfly: - image: jboss/wildfly:18.0.1.Final + image: quay.io/wildfly/wildfly:28.0.1.Final-jdk11 environment: JAVA_OPTS: -Dopensearch.uri=opensearch:9200 -Djboss.http.port=8080 -Djava.net.preferIPv4Stack=true volumes: diff --git a/qa/wildfly/src/main/java/org/opensearch/wildfly/model/Employee.java b/qa/wildfly/src/main/java/org/opensearch/wildfly/model/Employee.java index 5cf40ce941636..8f8d6635d568d 100644 --- a/qa/wildfly/src/main/java/org/opensearch/wildfly/model/Employee.java +++ b/qa/wildfly/src/main/java/org/opensearch/wildfly/model/Employee.java @@ -34,12 +34,8 @@ import com.fasterxml.jackson.annotation.JsonProperty; -import javax.ws.rs.Consumes; -import javax.ws.rs.core.MediaType; - import java.util.List; -@Consumes(MediaType.APPLICATION_JSON) public class Employee { @JsonProperty(value = "first_name") diff --git a/qa/wildfly/src/main/java/org/opensearch/wildfly/transport/RestHighLevelClientActivator.java b/qa/wildfly/src/main/java/org/opensearch/wildfly/transport/RestHighLevelClientActivator.java index bf91346933e02..14b8cd6730531 100644 --- a/qa/wildfly/src/main/java/org/opensearch/wildfly/transport/RestHighLevelClientActivator.java +++ b/qa/wildfly/src/main/java/org/opensearch/wildfly/transport/RestHighLevelClientActivator.java @@ -32,18 +32,10 @@ package org.opensearch.wildfly.transport; -import javax.ws.rs.ApplicationPath; -import javax.ws.rs.core.Application; - -import java.util.Collections; -import java.util.Set; +import jakarta.ws.rs.ApplicationPath; +import jakarta.ws.rs.core.Application; @ApplicationPath("/transport") public class RestHighLevelClientActivator extends Application { - @Override - public Set> getClasses() { - return Collections.singleton(RestHighLevelClientEmployeeResource.class); - } - } diff --git a/qa/wildfly/src/main/java/org/opensearch/wildfly/transport/RestHighLevelClientEmployeeResource.java b/qa/wildfly/src/main/java/org/opensearch/wildfly/transport/RestHighLevelClientEmployeeResource.java index 036d782f6f5e8..432b40367c978 100644 --- a/qa/wildfly/src/main/java/org/opensearch/wildfly/transport/RestHighLevelClientEmployeeResource.java +++ b/qa/wildfly/src/main/java/org/opensearch/wildfly/transport/RestHighLevelClientEmployeeResource.java @@ -32,6 +32,15 @@ package org.opensearch.wildfly.transport; +import jakarta.inject.Inject; +import jakarta.ws.rs.Consumes; +import jakarta.ws.rs.GET; +import jakarta.ws.rs.PUT; +import jakarta.ws.rs.Path; +import jakarta.ws.rs.PathParam; +import jakarta.ws.rs.Produces; +import jakarta.ws.rs.core.MediaType; +import jakarta.ws.rs.core.Response; import org.opensearch.action.get.GetRequest; import org.opensearch.action.get.GetResponse; import org.opensearch.action.index.IndexRequest; @@ -41,14 +50,6 @@ import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.wildfly.model.Employee; -import javax.inject.Inject; -import javax.ws.rs.GET; -import javax.ws.rs.PUT; -import javax.ws.rs.Path; -import javax.ws.rs.PathParam; -import javax.ws.rs.Produces; -import javax.ws.rs.core.MediaType; -import javax.ws.rs.core.Response; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; @@ -88,6 +89,7 @@ public Response getEmployeeById(final @PathParam("id") Long id) throws IOExcepti @PUT @Path("/{id}") + @Consumes(MediaType.APPLICATION_JSON) @Produces(MediaType.APPLICATION_JSON) public Response putEmployeeById(final @PathParam("id") Long id, final Employee employee) throws URISyntaxException, IOException { Objects.requireNonNull(id); diff --git a/qa/wildfly/src/main/java/org/opensearch/wildfly/transport/RestHighLevelClientProducer.java b/qa/wildfly/src/main/java/org/opensearch/wildfly/transport/RestHighLevelClientProducer.java index 2b1abe45f7723..490ecd214c3f3 100644 --- a/qa/wildfly/src/main/java/org/opensearch/wildfly/transport/RestHighLevelClientProducer.java +++ b/qa/wildfly/src/main/java/org/opensearch/wildfly/transport/RestHighLevelClientProducer.java @@ -32,14 +32,13 @@ package org.opensearch.wildfly.transport; +import jakarta.enterprise.inject.Produces; import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.RestClient; import org.opensearch.client.RestHighLevelClient; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.io.PathUtils; -import javax.enterprise.inject.Produces; - import java.net.URISyntaxException; import java.nio.file.Path; diff --git a/qa/wildfly/src/main/java/org/opensearch/wildfly/transport/RestHighLevelJacksonJsonProvider.java b/qa/wildfly/src/main/java/org/opensearch/wildfly/transport/RestHighLevelJacksonJsonProvider.java index 604d975f53280..7989f0351daef 100644 --- a/qa/wildfly/src/main/java/org/opensearch/wildfly/transport/RestHighLevelJacksonJsonProvider.java +++ b/qa/wildfly/src/main/java/org/opensearch/wildfly/transport/RestHighLevelJacksonJsonProvider.java @@ -34,7 +34,7 @@ import org.jboss.resteasy.plugins.providers.jackson.ResteasyJackson2Provider; -import javax.ws.rs.ext.Provider; +import jakarta.ws.rs.ext.Provider; @Provider public class RestHighLevelJacksonJsonProvider extends ResteasyJackson2Provider { diff --git a/qa/wildfly/src/main/webapp/WEB-INF/jboss-deployment-structure.xml b/qa/wildfly/src/main/webapp/WEB-INF/jboss-deployment-structure.xml index 7191bfe1268aa..a08090100989a 100644 --- a/qa/wildfly/src/main/webapp/WEB-INF/jboss-deployment-structure.xml +++ b/qa/wildfly/src/main/webapp/WEB-INF/jboss-deployment-structure.xml @@ -1,9 +1,6 @@ - - - diff --git a/release-notes/opensearch.release-notes-1.3.11.md b/release-notes/opensearch.release-notes-1.3.11.md new file mode 100644 index 0000000000000..b2589d94b4f57 --- /dev/null +++ b/release-notes/opensearch.release-notes-1.3.11.md @@ -0,0 +1,13 @@ +## 2023-06-27 Version 1.3.11 Release Notes + +### Upgrades +- Upgrade `netty` from 4.1.91.Final to 4.1.93.Final ([#7901](https://github.com/opensearch-project/OpenSearch/pull/7901)) +- Upgrade `spock-core` from 1.3-groovy-2.5 to 2.3-groovy-2.5 ([#8119](https://github.com/opensearch-project/OpenSearch/pull/8119)) +- Upgrade `com.google.guava:guava` from 31.0.1-jre to 32.0.1-jre ([#8107](https://github.com/opensearch-project/OpenSearch/pull/8107)) +- Upgrade versions of gradle-info-plugin and nebula-publishing-plugin ([#8150](https://github.com/opensearch-project/OpenSearch/pull/8150)) +- Upgrade `json-smart` from 2.4.7 to 2.4.11 in plugins/repository-hdfs/ ([#8160](https://github.com/opensearch-project/OpenSearch/pull/8160)) +- Upgrade `netty` from 4.1.93.Final to 4.1.94.Final ([#8191](https://github.com/opensearch-project/OpenSearch/pull/8191)) + +### Bug Fixes +- Fix mapping char_filter when mapping a hashtag ([#7591](https://github.com/opensearch-project/OpenSearch/pull/7591)) +- Force nebula plugins to use latest org.bouncycastle:* artifacts ([#8233](https://github.com/opensearch-project/OpenSearch/pull/8233)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.restore.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.restore.json index 87ab8117ec489..07148c7d261f4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.restore.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.restore.json @@ -42,6 +42,10 @@ "type":"boolean", "description":"Should this request wait until the operation has completed before returning", "default":false + }, + "source_remote_store_repository": { + "type":"string", + "description":"Remote Store Repository of Remote Store Indices" } }, "body":{ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/370_multi_terms.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/370_multi_terms.yml index 0f897866fcb9d..7db5f31d8e761 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/370_multi_terms.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/370_multi_terms.yml @@ -712,3 +712,51 @@ setup: - match: { aggregations.m_terms.buckets.0.key: ["a", 1] } - match: { aggregations.m_terms.buckets.0.key_as_string: "a|1" } - match: { aggregations.m_terms.buckets.0.doc_count: 4 } + +--- +"aggregate over multi-terms test": + - skip: + version: "- 2.8.99" + reason: "multi_terms aggregation was introduced in 2.1.0, NPE bug checked by this test case will manifest in any version < 3.0" + + - do: + bulk: + index: test_1 + refresh: true + body: + - '{"index": {}}' + - '{"str": "a", "ip": "127.0.0.1", "date": "2022-03-23"}' + - '{"index": {}}' + - '{"str": "a", "ip": "127.0.0.1", "date": "2022-03-25"}' + - '{"index": {}}' + - '{"str": "b", "ip": "127.0.0.1", "date": "2022-03-23"}' + - '{"index": {}}' + - '{"str": "b", "ip": "127.0.0.1", "date": "2022-03-25"}' + + - do: + search: + index: test_1 + size: 0 + body: + aggs: + histo: + date_histogram: + field: date + calendar_interval: day + aggs: + m_terms: + multi_terms: + terms: + - field: str + - field: ip + + - match: { hits.total.value: 4 } + - length: { aggregations.histo.buckets: 3 } + - match: { aggregations.histo.buckets.0.key_as_string: "2022-03-23T00:00:00.000Z" } + - match: { aggregations.histo.buckets.0.m_terms.buckets.0.key: ["a", "127.0.0.1"] } + - match: { aggregations.histo.buckets.0.m_terms.buckets.1.key: ["b", "127.0.0.1"] } + - match: { aggregations.histo.buckets.1.key_as_string: "2022-03-24T00:00:00.000Z" } + - length: { aggregations.histo.buckets.1.m_terms.buckets: 0 } + - match: { aggregations.histo.buckets.2.key_as_string: "2022-03-25T00:00:00.000Z" } + - match: { aggregations.histo.buckets.2.m_terms.buckets.0.key: [ "a", "127.0.0.1" ] } + - match: { aggregations.histo.buckets.2.m_terms.buckets.1.key: [ "b", "127.0.0.1" ] } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/60_empty.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/60_empty.yml new file mode 100644 index 0000000000000..54f61d46b6f6c --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/60_empty.yml @@ -0,0 +1,32 @@ +--- +"Empty aggs Body": + - skip: + version: "- 2.8.99" + reason: "the fix was introduced in 2.9.0" + - do: + index: + index: test + id: 1 + body: { "double" : 42 } + + - do: + index: + index: test + id: 2 + body: { "double" : 100 } + + - do: + index: + index: test + id: 3 + body: { "double" : 50 } + + - do: + indices.refresh: {} + + - do: + search: + rest_total_hits_as_int: true + body: { "size" : 0, "aggs" : { } } + + - match: { hits.total: 3 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml index 65c1527a68b96..55e1566656faf 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml @@ -160,6 +160,28 @@ - match: {hits.hits.0._source.timestamp: "2019-10-21 00:30:04.828" } - match: {hits.hits.0.sort: [1571617804828] } + # search_after with the sort with missing + - do: + bulk: + refresh: true + index: test + body: | + {"index":{}} + {"timestamp": null} + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 5 + "sort": [ { "timestamp": { "order": "asc", "missing": "_last" } } ] + search_after: [ "2021-10-21 08:30:04.828" ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 3 } + - length: { hits.hits: 1 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.timestamp: null } + --- "date_nanos": - skip: @@ -276,3 +298,25 @@ - match: {hits.hits.0._index: test } - match: {hits.hits.0._source.population: 15223372036854775800 } - match: {hits.hits.0.sort: [15223372036854775800] } + + # search_after with the sort with missing + - do: + bulk: + refresh: true + index: test + body: | + {"index":{}} + {"population": null} + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 5 + "sort": [ { "population": { "order": "asc", "missing": "_last" } } ] + search_after: [15223372036854775801] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 3 } + - length: { hits.hits: 1 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.population: null } diff --git a/sandbox/plugins/custom-codecs/build.gradle b/sandbox/plugins/custom-codecs/build.gradle deleted file mode 100644 index 2183df25044a4..0000000000000 --- a/sandbox/plugins/custom-codecs/build.gradle +++ /dev/null @@ -1,28 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - * - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -apply plugin: 'opensearch.opensearchplugin' -apply plugin: 'opensearch.yaml-rest-test' - -opensearchplugin { - name 'custom-codecs' - description 'A plugin that implements custom compression codecs.' - classname 'org.opensearch.index.codec.customcodecs.CustomCodecPlugin' - licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt') - noticeFile rootProject.file('NOTICE.txt') -} - -dependencies { - api "com.github.luben:zstd-jni:${versions.zstd}" -} - -yamlRestTest.enabled = false; -testingConventions.enabled = false; diff --git a/sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecPlugin.java b/sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecPlugin.java deleted file mode 100644 index 9d36184bf81af..0000000000000 --- a/sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecPlugin.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.codec.customcodecs; - -import org.opensearch.plugins.Plugin; -import org.opensearch.plugins.EnginePlugin; - -/** - * A plugin that implements custom codecs. Supports these codecs: - *
    - *
  • zstd - *
  • zstdnodict - *
- * - * @opensearch.internal - */ -public final class CustomCodecPlugin extends Plugin implements EnginePlugin { - /** Creates a new instance. */ - public CustomCodecPlugin() {} -} diff --git a/server/build.gradle b/server/build.gradle index 2738e4351a109..ab67eabe76d0c 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -102,6 +102,8 @@ dependencies { api project(':libs:opensearch-secure-sm') api project(':libs:opensearch-x-content') api project(":libs:opensearch-geo") + api project(":libs:opensearch-telemetry") + compileOnly project(':libs:opensearch-plugin-classloader') testRuntimeOnly project(':libs:opensearch-plugin-classloader') diff --git a/server/licenses/lucene-analysis-common-9.7.0-snapshot-204acc3.jar.sha1 b/server/licenses/lucene-analysis-common-9.7.0-snapshot-204acc3.jar.sha1 deleted file mode 100644 index a770070d97c2d..0000000000000 --- a/server/licenses/lucene-analysis-common-9.7.0-snapshot-204acc3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5aa5989b931c68eee90b22bca3f1f280a7d5c1ee \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.7.0.jar.sha1 b/server/licenses/lucene-analysis-common-9.7.0.jar.sha1 new file mode 100644 index 0000000000000..45d8f459573b1 --- /dev/null +++ b/server/licenses/lucene-analysis-common-9.7.0.jar.sha1 @@ -0,0 +1 @@ +27ba6caaa4587a982cd451f7217b5a982bcfc44a \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.7.0-snapshot-204acc3.jar.sha1 b/server/licenses/lucene-backward-codecs-9.7.0-snapshot-204acc3.jar.sha1 deleted file mode 100644 index c35bc03c19097..0000000000000 --- a/server/licenses/lucene-backward-codecs-9.7.0-snapshot-204acc3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2326fb4762cd14eff6bea770d5c85a848e7a2482 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.7.0.jar.sha1 b/server/licenses/lucene-backward-codecs-9.7.0.jar.sha1 new file mode 100644 index 0000000000000..3981ea4fa226e --- /dev/null +++ b/server/licenses/lucene-backward-codecs-9.7.0.jar.sha1 @@ -0,0 +1 @@ +6389463bfbfcf902c8d31d12e9513a6818ac9d5e \ No newline at end of file diff --git a/server/licenses/lucene-core-9.7.0-snapshot-204acc3.jar.sha1 b/server/licenses/lucene-core-9.7.0-snapshot-204acc3.jar.sha1 deleted file mode 100644 index 2afe66c03cf22..0000000000000 --- a/server/licenses/lucene-core-9.7.0-snapshot-204acc3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3aa698cf90f074cbf24acfd7feaaad84c5a6f829 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.7.0.jar.sha1 b/server/licenses/lucene-core-9.7.0.jar.sha1 new file mode 100644 index 0000000000000..2b0f77275c0ab --- /dev/null +++ b/server/licenses/lucene-core-9.7.0.jar.sha1 @@ -0,0 +1 @@ +ad391210ffd806931334be9670a35af00c56f959 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.7.0-snapshot-204acc3.jar.sha1 b/server/licenses/lucene-grouping-9.7.0-snapshot-204acc3.jar.sha1 deleted file mode 100644 index c7aaff3a0b184..0000000000000 --- a/server/licenses/lucene-grouping-9.7.0-snapshot-204acc3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b3251c58b21c5c205c63bbe618c0d3c1393908e1 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.7.0.jar.sha1 b/server/licenses/lucene-grouping-9.7.0.jar.sha1 new file mode 100644 index 0000000000000..90acbf6dcee8d --- /dev/null +++ b/server/licenses/lucene-grouping-9.7.0.jar.sha1 @@ -0,0 +1 @@ +8e6f0c229f4861be641047c33b05067176e4279c \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.7.0-snapshot-204acc3.jar.sha1 b/server/licenses/lucene-highlighter-9.7.0-snapshot-204acc3.jar.sha1 deleted file mode 100644 index 6f9bfe7604f7f..0000000000000 --- a/server/licenses/lucene-highlighter-9.7.0-snapshot-204acc3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2313a0755a5774eb5dfcdc116dc15bd9367c8cc1 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.7.0.jar.sha1 b/server/licenses/lucene-highlighter-9.7.0.jar.sha1 new file mode 100644 index 0000000000000..bfcca0bc6cb5b --- /dev/null +++ b/server/licenses/lucene-highlighter-9.7.0.jar.sha1 @@ -0,0 +1 @@ +facb7c7ee0f75ed457a2d98f10d6430e25a53691 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.7.0-snapshot-204acc3.jar.sha1 b/server/licenses/lucene-join-9.7.0-snapshot-204acc3.jar.sha1 deleted file mode 100644 index 00b5c33be3568..0000000000000 --- a/server/licenses/lucene-join-9.7.0-snapshot-204acc3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -25fddcaf47d5614f48dc264f79151eb87990abd9 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.7.0.jar.sha1 b/server/licenses/lucene-join-9.7.0.jar.sha1 new file mode 100644 index 0000000000000..0dab3a7ddc41a --- /dev/null +++ b/server/licenses/lucene-join-9.7.0.jar.sha1 @@ -0,0 +1 @@ +d041bdc0947a14223cf68357407ee18b21027587 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.7.0-snapshot-204acc3.jar.sha1 b/server/licenses/lucene-memory-9.7.0-snapshot-204acc3.jar.sha1 deleted file mode 100644 index cf7eced4686aa..0000000000000 --- a/server/licenses/lucene-memory-9.7.0-snapshot-204acc3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7c2aad4c1baa818a94bd787615e70824e6ae3a9d \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.7.0.jar.sha1 b/server/licenses/lucene-memory-9.7.0.jar.sha1 new file mode 100644 index 0000000000000..357a9c4b2ea26 --- /dev/null +++ b/server/licenses/lucene-memory-9.7.0.jar.sha1 @@ -0,0 +1 @@ +0fade51ee353e15ddbbc45262aafe6f99ed020f1 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.7.0-snapshot-204acc3.jar.sha1 b/server/licenses/lucene-misc-9.7.0-snapshot-204acc3.jar.sha1 deleted file mode 100644 index b5ac7eb1f988b..0000000000000 --- a/server/licenses/lucene-misc-9.7.0-snapshot-204acc3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -56211acb2a31b1967568eba9953db502a53f9010 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.7.0.jar.sha1 b/server/licenses/lucene-misc-9.7.0.jar.sha1 new file mode 100644 index 0000000000000..da5e1921626b2 --- /dev/null +++ b/server/licenses/lucene-misc-9.7.0.jar.sha1 @@ -0,0 +1 @@ +7fcf451e2376526c3a027958812866cc5b0ff13f \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.7.0-snapshot-204acc3.jar.sha1 b/server/licenses/lucene-queries-9.7.0-snapshot-204acc3.jar.sha1 deleted file mode 100644 index 2aa628ebc9573..0000000000000 --- a/server/licenses/lucene-queries-9.7.0-snapshot-204acc3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c45b1847150e2e2e6302177b41faf95df2ca4fbf \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.7.0.jar.sha1 b/server/licenses/lucene-queries-9.7.0.jar.sha1 new file mode 100644 index 0000000000000..fa82e95a7e19f --- /dev/null +++ b/server/licenses/lucene-queries-9.7.0.jar.sha1 @@ -0,0 +1 @@ +126989d4622419aa06fcbf3a342e859cab8c8799 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.7.0-snapshot-204acc3.jar.sha1 b/server/licenses/lucene-queryparser-9.7.0-snapshot-204acc3.jar.sha1 deleted file mode 100644 index 5e7b5d9da9c0d..0000000000000 --- a/server/licenses/lucene-queryparser-9.7.0-snapshot-204acc3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d40fe6400f21564ccb50b6716ec42d8dcd526b9d \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.7.0.jar.sha1 b/server/licenses/lucene-queryparser-9.7.0.jar.sha1 new file mode 100644 index 0000000000000..438db0aea66e1 --- /dev/null +++ b/server/licenses/lucene-queryparser-9.7.0.jar.sha1 @@ -0,0 +1 @@ +6e77bde908ff698354e4a2149e6dd4658b56d7b0 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.7.0-snapshot-204acc3.jar.sha1 b/server/licenses/lucene-sandbox-9.7.0-snapshot-204acc3.jar.sha1 deleted file mode 100644 index e45e9b929836b..0000000000000 --- a/server/licenses/lucene-sandbox-9.7.0-snapshot-204acc3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e9347773d0b269768c58dd0b438bdc4b450f9185 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.7.0.jar.sha1 b/server/licenses/lucene-sandbox-9.7.0.jar.sha1 new file mode 100644 index 0000000000000..38b0b1cccbc29 --- /dev/null +++ b/server/licenses/lucene-sandbox-9.7.0.jar.sha1 @@ -0,0 +1 @@ +9f3e8e1947f2f1c5784132444af51a060ff0b4bf \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.7.0-snapshot-204acc3.jar.sha1 b/server/licenses/lucene-spatial-extras-9.7.0-snapshot-204acc3.jar.sha1 deleted file mode 100644 index a697cb9fa2dff..0000000000000 --- a/server/licenses/lucene-spatial-extras-9.7.0-snapshot-204acc3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7055502c9a77b9a93d1b96c99397fd765dd7891f \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.7.0.jar.sha1 b/server/licenses/lucene-spatial-extras-9.7.0.jar.sha1 new file mode 100644 index 0000000000000..48679df469fd1 --- /dev/null +++ b/server/licenses/lucene-spatial-extras-9.7.0.jar.sha1 @@ -0,0 +1 @@ +01b0bc7a407d8c35a70a1adf7966bb3e7caae928 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.7.0-snapshot-204acc3.jar.sha1 b/server/licenses/lucene-spatial3d-9.7.0-snapshot-204acc3.jar.sha1 deleted file mode 100644 index 3b96e448add5b..0000000000000 --- a/server/licenses/lucene-spatial3d-9.7.0-snapshot-204acc3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a2c5a7be5887dacf89c251936c3f3388ca20d28e \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.7.0.jar.sha1 b/server/licenses/lucene-spatial3d-9.7.0.jar.sha1 new file mode 100644 index 0000000000000..55d4d217fa6b9 --- /dev/null +++ b/server/licenses/lucene-spatial3d-9.7.0.jar.sha1 @@ -0,0 +1 @@ +7c6b1b6e0a70c9cd177371e648648c2f896742a2 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.7.0-snapshot-204acc3.jar.sha1 b/server/licenses/lucene-suggest-9.7.0-snapshot-204acc3.jar.sha1 deleted file mode 100644 index 0460861900897..0000000000000 --- a/server/licenses/lucene-suggest-9.7.0-snapshot-204acc3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1a076a8db84fa85634cc4894ee4fc78b5e69525c \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.7.0.jar.sha1 b/server/licenses/lucene-suggest-9.7.0.jar.sha1 new file mode 100644 index 0000000000000..d4d7e6cd6bed9 --- /dev/null +++ b/server/licenses/lucene-suggest-9.7.0.jar.sha1 @@ -0,0 +1 @@ +5c37fd9a5d71dc87fe1cd4c18ff295ec8cfac170 \ No newline at end of file diff --git a/server/src/internalClusterTest/java/org/opensearch/index/codec/MultiCodecMergeIT.java b/server/src/internalClusterTest/java/org/opensearch/index/codec/MultiCodecMergeIT.java new file mode 100644 index 0000000000000..2866292e5e2e0 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/index/codec/MultiCodecMergeIT.java @@ -0,0 +1,175 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec; + +import org.opensearch.action.admin.indices.flush.FlushResponse; +import org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse; +import org.opensearch.action.admin.indices.refresh.RefreshResponse; +import org.opensearch.action.admin.indices.segments.IndicesSegmentsRequest; +import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.engine.Segment; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static java.util.stream.Collectors.toList; +import static org.hamcrest.Matchers.is; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_BLOCKS_METADATA; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_BLOCKS_READ; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_BLOCKS_WRITE; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_READ_ONLY; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_READ_ONLY_ALLOW_DELETE; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) +public class MultiCodecMergeIT extends OpenSearchIntegTestCase { + + public void testForceMergeMultipleCodecs() throws ExecutionException, InterruptedException { + + Map codecMap = Map.of( + "best_compression", + "BEST_COMPRESSION", + "zstd_no_dict", + "ZSTD_NO_DICT", + "zstd", + "ZSTD", + "default", + "BEST_SPEED" + ); + + for (Map.Entry codec : codecMap.entrySet()) { + forceMergeMultipleCodecs(codec.getKey(), codec.getValue(), codecMap); + } + + } + + private void forceMergeMultipleCodecs(String finalCodec, String finalCodecMode, Map codecMap) throws ExecutionException, + InterruptedException { + + internalCluster().ensureAtLeastNumDataNodes(1); + final String index = "test-index" + finalCodec; + + // creating index + createIndex( + index, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.codec", "default") + .put("index.merge.policy.max_merged_segment", "1b") + .build() + ); + ensureGreen(index); + // ingesting and asserting segment codec mode for all four codecs + for (Map.Entry codec : codecMap.entrySet()) { + useCodec(index, codec.getKey()); + ingestDocs(index); + } + + assertTrue( + getSegments(index).stream() + .flatMap(s -> s.getAttributes().values().stream()) + .collect(Collectors.toSet()) + .containsAll(codecMap.values()) + ); + + // force merge into final codec + useCodec(index, finalCodec); + flushAndRefreshIndex(index); + final ForceMergeResponse forceMergeResponse = client().admin().indices().prepareForceMerge(index).setMaxNumSegments(1).get(); + + assertThat(forceMergeResponse.getFailedShards(), is(0)); + assertThat(forceMergeResponse.getSuccessfulShards(), is(1)); + + flushAndRefreshIndex(index); + + List segments = getSegments(index).stream().filter(Segment::isSearch).collect(Collectors.toList()); + assertEquals(1, segments.size()); + assertTrue(segments.stream().findFirst().get().attributes.containsValue(finalCodecMode)); + } + + private void useCodec(String index, String codec) throws ExecutionException, InterruptedException { + assertAcked(client().admin().indices().prepareClose(index)); + + assertAcked( + client().admin() + .indices() + .updateSettings(new UpdateSettingsRequest(index).settings(Settings.builder().put("index.codec", codec))) + .get() + ); + + assertAcked(client().admin().indices().prepareOpen(index)); + } + + private void ingestDocs(String index) throws InterruptedException { + ingest(index); + flushAndRefreshIndex(index); + } + + private ArrayList getSegments(String index) { + + return new ArrayList<>( + client().admin() + .indices() + .segments(new IndicesSegmentsRequest(index)) + .actionGet() + .getIndices() + .get(index) + .getShards() + .get(0) + .getShards()[0].getSegments() + ); + } + + private void ingest(String index) throws InterruptedException { + + final int nbDocs = randomIntBetween(1, 5); + indexRandom( + randomBoolean(), + false, + randomBoolean(), + IntStream.range(0, nbDocs) + .mapToObj(i -> client().prepareIndex(index).setId(UUID.randomUUID().toString()).setSource("num", i)) + .collect(toList()) + ); + } + + private void flushAndRefreshIndex(String index) { + + // Request is not blocked + for (String blockSetting : Arrays.asList( + SETTING_BLOCKS_READ, + SETTING_BLOCKS_WRITE, + SETTING_READ_ONLY, + SETTING_BLOCKS_METADATA, + SETTING_READ_ONLY_ALLOW_DELETE + )) { + try { + enableIndexBlock(index, blockSetting); + FlushResponse flushResponse = client().admin().indices().prepareFlush(index).setForce(true).execute().actionGet(); + assertNoFailures(flushResponse); + RefreshResponse response = client().admin().indices().prepareRefresh(index).execute().actionGet(); + assertNoFailures(response); + } finally { + disableIndexBlock(index, blockSetting); + } + } + } + +} diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java index 5056adea32f83..b6ea3a094f496 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java @@ -233,23 +233,30 @@ private void verifyPerIndexPrimaryBalance() throws Exception { RoutingNodes nodes = currentState.getRoutingNodes(); for (final Map.Entry index : currentState.getRoutingTable().indicesRouting().entrySet()) { final int totalPrimaryShards = index.getValue().primaryShardsActive(); - final int avgPrimaryShardsPerNode = (int) Math.ceil(totalPrimaryShards * 1f / currentState.getRoutingNodes().size()); + final int lowerBoundPrimaryShardsPerNode = (int) Math.floor(totalPrimaryShards * 1f / currentState.getRoutingNodes().size()) + - 1; + final int upperBoundPrimaryShardsPerNode = (int) Math.ceil(totalPrimaryShards * 1f / currentState.getRoutingNodes().size()) + + 1; for (RoutingNode node : nodes) { final int primaryCount = node.shardsWithState(index.getKey(), STARTED) .stream() .filter(ShardRouting::primary) .collect(Collectors.toList()) .size(); - if (primaryCount > avgPrimaryShardsPerNode) { - logger.info( - "--> Primary shard balance assertion failure for index {} on node {} {} <= {}", - index.getKey(), - node.node().getName(), - primaryCount, - avgPrimaryShardsPerNode - ); - } - assertTrue(primaryCount <= avgPrimaryShardsPerNode); + // Asserts value is within the variance threshold (-1/+1 of the average value). + assertTrue( + "--> Primary balance assertion failure for index " + + index + + "on node " + + node.node().getName() + + " " + + lowerBoundPrimaryShardsPerNode + + " <= " + + primaryCount + + " (assigned) <= " + + upperBoundPrimaryShardsPerNode, + lowerBoundPrimaryShardsPerNode <= primaryCount && primaryCount <= upperBoundPrimaryShardsPerNode + ); } } }, 60, TimeUnit.SECONDS); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java index 0a593e6149ddd..ce5e0989b622f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java @@ -20,6 +20,7 @@ import org.apache.lucene.index.StandardDirectoryReader; import org.apache.lucene.tests.util.TestUtil; import org.apache.lucene.util.BytesRef; +import org.junit.Before; import org.opensearch.action.ActionFuture; import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.admin.indices.stats.IndicesStatsRequest; @@ -95,11 +96,16 @@ @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class SegmentReplicationIT extends SegmentReplicationBaseIT { + @Before + private void setup() { + internalCluster().startClusterManagerOnlyNode(); + } + public void testPrimaryStopped_ReplicaPromoted() throws Exception { - final String primary = internalCluster().startNode(); + final String primary = internalCluster().startDataOnlyNode(); createIndex(INDEX_NAME); ensureYellowAndNoInitializingShards(INDEX_NAME); - final String replica = internalCluster().startNode(); + final String replica = internalCluster().startDataOnlyNode(); ensureGreen(INDEX_NAME); client().prepareIndex(INDEX_NAME).setId("1").setSource("foo", "bar").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); @@ -125,7 +131,7 @@ public void testPrimaryStopped_ReplicaPromoted() throws Exception { assertHitCount(client(replica).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), 3); // start another node, index another doc and replicate. - String nodeC = internalCluster().startNode(); + String nodeC = internalCluster().startDataOnlyNode(); ensureGreen(INDEX_NAME); client().prepareIndex(INDEX_NAME).setId("4").setSource("baz", "baz").get(); refresh(INDEX_NAME); @@ -134,10 +140,10 @@ public void testPrimaryStopped_ReplicaPromoted() throws Exception { } public void testRestartPrimary() throws Exception { - final String primary = internalCluster().startNode(); + final String primary = internalCluster().startDataOnlyNode(); createIndex(INDEX_NAME); ensureYellowAndNoInitializingShards(INDEX_NAME); - final String replica = internalCluster().startNode(); + final String replica = internalCluster().startDataOnlyNode(); ensureGreen(INDEX_NAME); assertEquals(getNodeContainingPrimaryShard().getName(), primary); @@ -160,10 +166,10 @@ public void testRestartPrimary() throws Exception { public void testCancelPrimaryAllocation() throws Exception { // this test cancels allocation on the primary - promoting the new replica and recreating the former primary as a replica. - final String primary = internalCluster().startNode(); + final String primary = internalCluster().startDataOnlyNode(); createIndex(INDEX_NAME); ensureYellowAndNoInitializingShards(INDEX_NAME); - final String replica = internalCluster().startNode(); + final String replica = internalCluster().startDataOnlyNode(); ensureGreen(INDEX_NAME); final int initialDocCount = 1; @@ -190,8 +196,8 @@ public void testCancelPrimaryAllocation() throws Exception { } public void testReplicationAfterPrimaryRefreshAndFlush() throws Exception { - final String nodeA = internalCluster().startNode(); - final String nodeB = internalCluster().startNode(); + final String nodeA = internalCluster().startDataOnlyNode(); + final String nodeB = internalCluster().startDataOnlyNode(); final Settings settings = Settings.builder() .put(indexSettings()) .put( @@ -233,8 +239,8 @@ public void testReplicationAfterPrimaryRefreshAndFlush() throws Exception { } public void testIndexReopenClose() throws Exception { - final String primary = internalCluster().startNode(); - final String replica = internalCluster().startNode(); + final String primary = internalCluster().startDataOnlyNode(); + final String replica = internalCluster().startDataOnlyNode(); createIndex(INDEX_NAME); ensureGreen(INDEX_NAME); @@ -274,8 +280,8 @@ public void testMultipleShards() throws Exception { .put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), false) .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .build(); - final String nodeA = internalCluster().startNode(); - final String nodeB = internalCluster().startNode(); + final String nodeA = internalCluster().startDataOnlyNode(); + final String nodeB = internalCluster().startDataOnlyNode(); createIndex(INDEX_NAME, indexSettings); ensureGreen(INDEX_NAME); @@ -310,8 +316,8 @@ public void testMultipleShards() throws Exception { } public void testReplicationAfterForceMerge() throws Exception { - final String nodeA = internalCluster().startNode(); - final String nodeB = internalCluster().startNode(); + final String nodeA = internalCluster().startDataOnlyNode(); + final String nodeB = internalCluster().startDataOnlyNode(); createIndex(INDEX_NAME); ensureGreen(INDEX_NAME); @@ -351,14 +357,13 @@ public void testReplicationAfterForceMerge() throws Exception { * This test verifies that segment replication does not fail for closed indices */ public void testClosedIndices() { - internalCluster().startClusterManagerOnlyNode(); List nodes = new ArrayList<>(); // start 1st node so that it contains the primary - nodes.add(internalCluster().startNode()); + nodes.add(internalCluster().startDataOnlyNode()); createIndex(INDEX_NAME, super.indexSettings()); ensureYellowAndNoInitializingShards(INDEX_NAME); // start 2nd node so that it contains the replica - nodes.add(internalCluster().startNode()); + nodes.add(internalCluster().startDataOnlyNode()); ensureGreen(INDEX_NAME); logger.info("--> Close index"); @@ -373,8 +378,7 @@ public void testClosedIndices() { * @throws Exception when issue is encountered */ public void testNodeDropWithOngoingReplication() throws Exception { - internalCluster().startClusterManagerOnlyNode(); - final String primaryNode = internalCluster().startNode(); + final String primaryNode = internalCluster().startDataOnlyNode(); createIndex( INDEX_NAME, Settings.builder() @@ -385,7 +389,7 @@ public void testNodeDropWithOngoingReplication() throws Exception { .build() ); ensureYellow(INDEX_NAME); - final String replicaNode = internalCluster().startNode(); + final String replicaNode = internalCluster().startDataOnlyNode(); ensureGreen(INDEX_NAME); ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState(); // Get replica allocation id @@ -447,11 +451,11 @@ public void testNodeDropWithOngoingReplication() throws Exception { } public void testCancellation() throws Exception { - final String primaryNode = internalCluster().startNode(); + final String primaryNode = internalCluster().startDataOnlyNode(); createIndex(INDEX_NAME, Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).build()); ensureYellow(INDEX_NAME); - final String replicaNode = internalCluster().startNode(); + final String replicaNode = internalCluster().startDataOnlyNode(); final SegmentReplicationSourceService segmentReplicationSourceService = internalCluster().getInstance( SegmentReplicationSourceService.class, @@ -506,7 +510,7 @@ public void testCancellation() throws Exception { } public void testStartReplicaAfterPrimaryIndexesDocs() throws Exception { - final String primaryNode = internalCluster().startNode(); + final String primaryNode = internalCluster().startDataOnlyNode(); createIndex(INDEX_NAME, Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build()); ensureGreen(INDEX_NAME); @@ -529,7 +533,7 @@ public void testStartReplicaAfterPrimaryIndexesDocs() throws Exception { .prepareUpdateSettings(INDEX_NAME) .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)) ); - final String replicaNode = internalCluster().startNode(); + final String replicaNode = internalCluster().startDataOnlyNode(); ensureGreen(INDEX_NAME); assertHitCount(client(primaryNode).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), 2); @@ -544,8 +548,8 @@ public void testStartReplicaAfterPrimaryIndexesDocs() throws Exception { } public void testDeleteOperations() throws Exception { - final String nodeA = internalCluster().startNode(); - final String nodeB = internalCluster().startNode(); + final String nodeA = internalCluster().startDataOnlyNode(); + final String nodeB = internalCluster().startDataOnlyNode(); createIndex(INDEX_NAME); ensureGreen(INDEX_NAME); @@ -591,9 +595,9 @@ public void testDeleteOperations() throws Exception { */ public void testReplicationPostDeleteAndForceMerge() throws Exception { assumeFalse("Skipping the test with Remote store as its flaky.", segmentReplicationWithRemoteEnabled()); - final String primary = internalCluster().startNode(); + final String primary = internalCluster().startDataOnlyNode(); createIndex(INDEX_NAME); - final String replica = internalCluster().startNode(); + final String replica = internalCluster().startDataOnlyNode(); ensureGreen(INDEX_NAME); final int initialDocCount = scaledRandomIntBetween(10, 200); for (int i = 0; i < initialDocCount; i++) { @@ -648,7 +652,6 @@ public void testReplicationPostDeleteAndForceMerge() throws Exception { } public void testUpdateOperations() throws Exception { - internalCluster().startClusterManagerOnlyNode(); final String primary = internalCluster().startDataOnlyNode(); createIndex(INDEX_NAME); ensureYellow(INDEX_NAME); @@ -702,7 +705,6 @@ public void testDropPrimaryDuringReplication() throws Exception { .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, replica_count) .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .build(); - final String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); final String primaryNode = internalCluster().startDataOnlyNode(); createIndex(INDEX_NAME, settings); final List dataNodes = internalCluster().startDataOnlyNodes(6); @@ -742,11 +744,10 @@ public void testDropPrimaryDuringReplication() throws Exception { } public void testReplicaHasDiffFilesThanPrimary() throws Exception { - internalCluster().startClusterManagerOnlyNode(); - final String primaryNode = internalCluster().startNode(); + final String primaryNode = internalCluster().startDataOnlyNode(); createIndex(INDEX_NAME, Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).build()); ensureYellow(INDEX_NAME); - final String replicaNode = internalCluster().startNode(); + final String replicaNode = internalCluster().startDataOnlyNode(); ensureGreen(INDEX_NAME); final IndexShard replicaShard = getIndexShard(replicaNode, INDEX_NAME); @@ -796,9 +797,9 @@ public void testReplicaHasDiffFilesThanPrimary() throws Exception { } public void testPressureServiceStats() throws Exception { - final String primaryNode = internalCluster().startNode(); + final String primaryNode = internalCluster().startDataOnlyNode(); createIndex(INDEX_NAME); - final String replicaNode = internalCluster().startNode(); + final String replicaNode = internalCluster().startDataOnlyNode(); ensureGreen(INDEX_NAME); int initialDocCount = scaledRandomIntBetween(100, 200); @@ -848,7 +849,7 @@ public void testPressureServiceStats() throws Exception { assertEquals(0, replicaNode_service.nodeStats().getShardStats().get(primaryShard.shardId()).getReplicaStats().size()); // start another replica. - String replicaNode_2 = internalCluster().startNode(); + String replicaNode_2 = internalCluster().startDataOnlyNode(); ensureGreen(INDEX_NAME); String docId = String.valueOf(initialDocCount + 1); client().prepareIndex(INDEX_NAME).setId(docId).setSource("foo", "bar").get(); @@ -887,10 +888,10 @@ public void testPressureServiceStats() throws Exception { public void testScrollCreatedOnReplica() throws Exception { assumeFalse("Skipping the test with Remote store as its flaky.", segmentReplicationWithRemoteEnabled()); // create the cluster with one primary node containing primary shard and replica node containing replica shard - final String primary = internalCluster().startNode(); + final String primary = internalCluster().startDataOnlyNode(); createIndex(INDEX_NAME); ensureYellowAndNoInitializingShards(INDEX_NAME); - final String replica = internalCluster().startNode(); + final String replica = internalCluster().startDataOnlyNode(); ensureGreen(INDEX_NAME); // index 100 docs @@ -981,7 +982,7 @@ public void testScrollWithOngoingSegmentReplication() throws Exception { ); // create the cluster with one primary node containing primary shard and replica node containing replica shard - final String primary = internalCluster().startNode(); + final String primary = internalCluster().startDataOnlyNode(); prepareCreate( INDEX_NAME, Settings.builder() @@ -989,7 +990,7 @@ public void testScrollWithOngoingSegmentReplication() throws Exception { .put("index.refresh_interval", -1) ).get(); ensureYellowAndNoInitializingShards(INDEX_NAME); - final String replica = internalCluster().startNode(); + final String replica = internalCluster().startDataOnlyNode(); ensureGreen(INDEX_NAME); final int initialDocCount = 10; @@ -1104,10 +1105,10 @@ public void testScrollWithOngoingSegmentReplication() throws Exception { } public void testPitCreatedOnReplica() throws Exception { - final String primary = internalCluster().startNode(); + final String primary = internalCluster().startDataOnlyNode(); createIndex(INDEX_NAME); ensureYellowAndNoInitializingShards(INDEX_NAME); - final String replica = internalCluster().startNode(); + final String replica = internalCluster().startDataOnlyNode(); ensureGreen(INDEX_NAME); client().prepareIndex(INDEX_NAME) .setId("1") @@ -1234,13 +1235,13 @@ public void testPitCreatedOnReplica() throws Exception { */ public void testPrimaryReceivesDocsDuringReplicaRecovery() throws Exception { final List nodes = new ArrayList<>(); - final String primaryNode = internalCluster().startNode(); + final String primaryNode = internalCluster().startDataOnlyNode(); nodes.add(primaryNode); final Settings settings = Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build(); createIndex(INDEX_NAME, settings); ensureGreen(INDEX_NAME); // start a replica node, initially will be empty with no shard assignment. - final String replicaNode = internalCluster().startNode(); + final String replicaNode = internalCluster().startDataOnlyNode(); nodes.add(replicaNode); // index a doc. diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java index 2bcbf3f5b614d..f57c312aa2cd0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java @@ -70,7 +70,7 @@ protected void deleteRepo() { assertAcked(clusterAdmin().prepareDeleteRepository(REPOSITORY_NAME)); } - protected void setup(Path repoLocation, double ioFailureRate, String skipExceptionBlobList, long maxFailure) { + protected String setup(Path repoLocation, double ioFailureRate, String skipExceptionBlobList, long maxFailure) { logger.info("--> Creating repository={} at the path={}", REPOSITORY_NAME, repoLocation); // The random_control_io_exception_rate setting ensures that 10-25% of all operations to remote store results in /// IOException. skip_exception_on_verification_file & skip_exception_on_list_blobs settings ensures that the @@ -88,13 +88,14 @@ protected void setup(Path repoLocation, double ioFailureRate, String skipExcepti .put("max_failure_number", maxFailure) ); - internalCluster().startDataOnlyNodes(1); + String dataNodeName = internalCluster().startDataOnlyNodes(1).get(0); createIndex(INDEX_NAME); logger.info("--> Created index={}", INDEX_NAME); ensureYellowAndNoInitializingShards(INDEX_NAME); logger.info("--> Cluster is yellow with no initializing shards"); ensureGreen(INDEX_NAME); logger.info("--> Cluster is green"); + return dataNodeName; } /** diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexClusterDefaultDocRep.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexClusterDefaultDocRep.java index a4f6aceedc586..40b608388736a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexClusterDefaultDocRep.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexClusterDefaultDocRep.java @@ -12,6 +12,7 @@ import org.opensearch.action.admin.indices.get.GetIndexResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; +import org.opensearch.index.IndexSettings; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.OpenSearchIntegTestCase; @@ -46,7 +47,15 @@ public void testRemoteStoreTranslogDisabledByUser() throws Exception { .getIndex(new GetIndexRequest().indices("test-idx-1").includeDefaults(true)) .get(); Settings indexSettings = getIndexResponse.settings().get("test-idx-1"); - verifyRemoteStoreIndexSettings(indexSettings, "true", "my-segment-repo-1", "false", null, ReplicationType.SEGMENT.toString(), null); + verifyRemoteStoreIndexSettings( + indexSettings, + "true", + "my-segment-repo-1", + "false", + null, + ReplicationType.SEGMENT.toString(), + IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL + ); } @Override @@ -61,7 +70,7 @@ public void testDefaultRemoteStoreNoUserOverride() throws Exception { ); assertThat( exc.getMessage(), - containsString("Cannot enable [index.remote_store.enabled] when [cluster.indices.replication.strategy] is DOCUMENT") + containsString("Cannot enable [index.remote_store.enabled] when [index.replication.type] is DOCUMENT") ); } @@ -84,7 +93,7 @@ public void testDefaultRemoteStoreNoUserOverrideExceptReplicationTypeSegment() t "true", "my-translog-repo-1", ReplicationType.SEGMENT.toString(), - null + IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL ); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexIT.java index a3235fd2a91d4..46966e289e75e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexIT.java @@ -12,17 +12,28 @@ import org.junit.Before; import org.opensearch.action.admin.indices.get.GetIndexRequest; import org.opensearch.action.admin.indices.get.GetIndexResponse; +import org.opensearch.action.admin.indices.settings.get.GetSettingsRequest; +import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.FeatureFlags; +import org.opensearch.indices.SystemIndexDescriptor; +import org.opensearch.index.IndexSettings; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.plugins.Plugin; +import org.opensearch.plugins.SystemIndexPlugin; import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.OpenSearchIntegTestCase; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; + import static org.hamcrest.Matchers.containsString; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_REPOSITORY; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_TRANSLOG_BUFFER_INTERVAL; +import static org.opensearch.index.IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_ENABLED; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; @@ -57,6 +68,20 @@ protected Settings nodeSettings(int nodeOriginal) { return builder.build(); } + public static class TestPlugin extends Plugin implements SystemIndexPlugin { + @Override + public Collection getSystemIndexDescriptors(Settings settings) { + return Collections.singletonList( + new SystemIndexDescriptor(SYSTEM_INDEX_NAME, "System index for [" + getTestClass().getName() + ']') + ); + } + } + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(CreateRemoteIndexIT.TestPlugin.class); + } + @Override protected Settings featureFlagSettings() { return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.REMOTE_STORE, "true").build(); @@ -101,10 +126,39 @@ public void testDefaultRemoteStoreNoUserOverride() throws Exception { "true", "my-translog-repo-1", ReplicationType.SEGMENT.toString(), - null + IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL ); } + private static final String SYSTEM_INDEX_NAME = ".test-system-index"; + + public void testSystemIndexWithRemoteStoreClusterSetting() throws Exception { + createIndex(SYSTEM_INDEX_NAME); + ensureGreen(SYSTEM_INDEX_NAME); + final GetSettingsResponse response = client().admin() + .indices() + .getSettings(new GetSettingsRequest().indices(SYSTEM_INDEX_NAME).includeDefaults(true)) + .actionGet(); + // Verify that Document replication strategy is used + assertEquals(response.getSetting(SYSTEM_INDEX_NAME, SETTING_REPLICATION_TYPE), ReplicationType.DOCUMENT.toString()); + assertEquals(response.getSetting(SYSTEM_INDEX_NAME, SETTING_REMOTE_STORE_ENABLED), "false"); + } + + public void testSystemIndexWithRemoteStoreIndexSettings() throws Exception { + prepareCreate( + SYSTEM_INDEX_NAME, + Settings.builder().put(SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT).put(SETTING_REMOTE_STORE_ENABLED, true) + ).get(); + ensureGreen(SYSTEM_INDEX_NAME); + final GetSettingsResponse response = client().admin() + .indices() + .getSettings(new GetSettingsRequest().indices(SYSTEM_INDEX_NAME).includeDefaults(true)) + .actionGet(); + // Verify that Document replication strategy is used + assertEquals(response.getSetting(SYSTEM_INDEX_NAME, SETTING_REPLICATION_TYPE), ReplicationType.DOCUMENT.toString()); + assertEquals(response.getSetting(SYSTEM_INDEX_NAME, SETTING_REMOTE_STORE_ENABLED), "false"); + } + public void testRemoteStoreDisabledByUser() throws Exception { Settings settings = Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) @@ -124,7 +178,7 @@ public void testRemoteStoreDisabledByUser() throws Exception { null, null, client().settings().get(CLUSTER_SETTING_REPLICATION_TYPE), - null + IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL ); } @@ -175,7 +229,15 @@ public void testReplicationTypeDocumentByUser() throws Exception { .getIndex(new GetIndexRequest().indices("test-idx-1").includeDefaults(true)) .get(); Settings indexSettings = getIndexResponse.settings().get("test-idx-1"); - verifyRemoteStoreIndexSettings(indexSettings, null, null, null, null, ReplicationType.DOCUMENT.toString(), null); + verifyRemoteStoreIndexSettings( + indexSettings, + null, + null, + null, + null, + ReplicationType.DOCUMENT.toString(), + IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL + ); } public void testRemoteStoreSegmentRepoWithoutRemoteEnabledAndSegmentReplicationIllegalArgumentException() throws Exception { @@ -216,7 +278,7 @@ public void testRemoteStoreEnabledByUserWithRemoteRepo() throws Exception { "true", "my-translog-repo-1", ReplicationType.SEGMENT.toString(), - null + IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL ); } @@ -232,7 +294,15 @@ public void testRemoteStoreTranslogDisabledByUser() throws Exception { .getIndex(new GetIndexRequest().indices("test-idx-1").includeDefaults(true)) .get(); Settings indexSettings = getIndexResponse.settings().get("test-idx-1"); - verifyRemoteStoreIndexSettings(indexSettings, "true", "my-segment-repo-1", "false", null, ReplicationType.SEGMENT.toString(), null); + verifyRemoteStoreIndexSettings( + indexSettings, + "true", + "my-segment-repo-1", + "false", + null, + ReplicationType.SEGMENT.toString(), + IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL + ); } public void testRemoteStoreOverrideOnlyTranslogRepoIllegalArgumentException() throws Exception { @@ -307,7 +377,15 @@ public void testRemoteStoreOverrideTranslogDisabledCorrectly() throws Exception .getIndex(new GetIndexRequest().indices("test-idx-1").includeDefaults(true)) .get(); Settings indexSettings = getIndexResponse.settings().get("test-idx-1"); - verifyRemoteStoreIndexSettings(indexSettings, "true", "my-custom-repo", "false", null, ReplicationType.SEGMENT.toString(), null); + verifyRemoteStoreIndexSettings( + indexSettings, + "true", + "my-custom-repo", + "false", + null, + ReplicationType.SEGMENT.toString(), + IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL + ); } public void testRemoteStoreOverrideTranslogDisabledWithTranslogRepoIllegalArgumentException() throws Exception { @@ -376,7 +454,7 @@ public void testRemoteStoreOverrideTranslogRepoCorrectly() throws Exception { "true", "my-custom-repo", ReplicationType.SEGMENT.toString(), - null + IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL ); } @@ -392,7 +470,15 @@ public void testRemoteStoreOverrideReplicationTypeIndexSettings() throws Excepti .getIndex(new GetIndexRequest().indices("test-idx-1").includeDefaults(true)) .get(); Settings indexSettings = getIndexResponse.settings().get("test-idx-1"); - verifyRemoteStoreIndexSettings(indexSettings, null, null, null, null, ReplicationType.DOCUMENT.toString(), null); + verifyRemoteStoreIndexSettings( + indexSettings, + null, + null, + null, + null, + ReplicationType.DOCUMENT.toString(), + IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL + ); } protected void verifyRemoteStoreIndexSettings( @@ -402,14 +488,14 @@ protected void verifyRemoteStoreIndexSettings( String isRemoteTranslogEnabled, String remoteTranslogRepo, String replicationType, - String translogBufferInterval + TimeValue translogBufferInterval ) { assertEquals(replicationType, indexSettings.get(SETTING_REPLICATION_TYPE)); assertEquals(isRemoteSegmentEnabled, indexSettings.get(SETTING_REMOTE_STORE_ENABLED)); assertEquals(remoteSegmentRepo, indexSettings.get(SETTING_REMOTE_STORE_REPOSITORY)); assertEquals(isRemoteTranslogEnabled, indexSettings.get(SETTING_REMOTE_TRANSLOG_STORE_ENABLED)); assertEquals(remoteTranslogRepo, indexSettings.get(SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY)); - assertEquals(translogBufferInterval, indexSettings.get(SETTING_REMOTE_TRANSLOG_BUFFER_INTERVAL)); + assertEquals(translogBufferInterval, INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(indexSettings)); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexTranslogDisabledIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexTranslogDisabledIT.java index 977b5f9c1b90e..01c463392d7b3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexTranslogDisabledIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexTranslogDisabledIT.java @@ -12,6 +12,7 @@ import org.opensearch.action.admin.indices.get.GetIndexResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; +import org.opensearch.index.IndexSettings; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.OpenSearchIntegTestCase; @@ -49,7 +50,15 @@ public void testRemoteStoreEnabledByUserWithRemoteRepo() throws Exception { .getIndex(new GetIndexRequest().indices("test-idx-1").includeDefaults(true)) .get(); Settings indexSettings = getIndexResponse.settings().get("test-idx-1"); - verifyRemoteStoreIndexSettings(indexSettings, "true", "my-custom-repo", null, null, ReplicationType.SEGMENT.toString(), null); + verifyRemoteStoreIndexSettings( + indexSettings, + "true", + "my-custom-repo", + null, + null, + ReplicationType.SEGMENT.toString(), + IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL + ); } public void testDefaultRemoteStoreNoUserOverride() throws Exception { @@ -63,7 +72,15 @@ public void testDefaultRemoteStoreNoUserOverride() throws Exception { .getIndex(new GetIndexRequest().indices("test-idx-1").includeDefaults(true)) .get(); Settings indexSettings = getIndexResponse.settings().get("test-idx-1"); - verifyRemoteStoreIndexSettings(indexSettings, "true", "my-segment-repo-1", null, null, ReplicationType.SEGMENT.toString(), null); + verifyRemoteStoreIndexSettings( + indexSettings, + "true", + "my-segment-repo-1", + null, + null, + ReplicationType.SEGMENT.toString(), + IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL + ); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/PrimaryTermValidationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/PrimaryTermValidationIT.java new file mode 100644 index 0000000000000..6691da81f057d --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/PrimaryTermValidationIT.java @@ -0,0 +1,166 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore; + +import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; +import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.opensearch.action.admin.indices.refresh.RefreshResponse; +import org.opensearch.action.index.IndexResponse; +import org.opensearch.action.support.IndicesOptions; +import org.opensearch.cluster.coordination.FollowersChecker; +import org.opensearch.cluster.coordination.LeaderChecker; +import org.opensearch.cluster.health.ClusterHealthStatus; +import org.opensearch.cluster.health.ClusterIndexHealth; +import org.opensearch.common.UUIDs; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.index.shard.ShardNotFoundException; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.disruption.NetworkDisruption; +import org.opensearch.test.transport.MockTransportService; + +import java.util.Arrays; +import java.util.Collection; +import java.util.HashSet; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.hamcrest.Matchers.equalTo; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) + +public class PrimaryTermValidationIT extends RemoteStoreBaseIntegTestCase { + + private static final String INDEX_NAME = "remote-store-test-idx-1"; + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(MockTransportService.TestPlugin.class); + } + + public void testPrimaryTermValidation() throws Exception { + // Follower checker interval is lower compared to leader checker so that the cluster manager can remove the node + // with network partition faster. The follower check retry count is also kept 1. + Settings clusterSettings = Settings.builder() + .put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "1s") + .put(LeaderChecker.LEADER_CHECK_INTERVAL_SETTING.getKey(), "20s") + .put(LeaderChecker.LEADER_CHECK_RETRY_COUNT_SETTING.getKey(), 4) + .put(FollowersChecker.FOLLOWER_CHECK_TIMEOUT_SETTING.getKey(), "1s") + .put(FollowersChecker.FOLLOWER_CHECK_INTERVAL_SETTING.getKey(), "1s") + .put(FollowersChecker.FOLLOWER_CHECK_RETRY_COUNT_SETTING.getKey(), 1) + .build(); + internalCluster().startClusterManagerOnlyNode(clusterSettings); + + // Create repository + absolutePath = randomRepoPath().toAbsolutePath(); + assertAcked( + clusterAdmin().preparePutRepository(REPOSITORY_NAME).setType("fs").setSettings(Settings.builder().put("location", absolutePath)) + ); + + // Start data nodes and create index + internalCluster().startDataOnlyNodes(2, clusterSettings); + createIndex(INDEX_NAME, remoteTranslogIndexSettings(1)); + ensureYellowAndNoInitializingShards(INDEX_NAME); + ensureGreen(INDEX_NAME); + + // Get the names of nodes to create network disruption + String primaryNode = primaryNodeName(INDEX_NAME); + String replicaNode = replicaNodeName(INDEX_NAME); + String clusterManagerNode = internalCluster().getClusterManagerName(); + logger.info("Node names : clusterManager={} primary={} replica={}", clusterManagerNode, primaryNode, replicaNode); + + // Index some docs and validate that both primary and replica node has it. Refresh is triggered to trigger segment replication + // to ensure replica is also upto date. + int numOfDocs = randomIntBetween(5, 10); + for (int i = 0; i < numOfDocs; i++) { + indexSameDoc(clusterManagerNode, INDEX_NAME); + } + refresh(INDEX_NAME); + assertBusy( + () -> assertHitCount(client(primaryNode).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), numOfDocs) + ); + assertBusy( + () -> assertHitCount(client(replicaNode).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), numOfDocs) + ); + + // Start network disruption - primary node will be isolated + Set nodesInOneSide = Stream.of(clusterManagerNode, replicaNode).collect(Collectors.toCollection(HashSet::new)); + Set nodesInOtherSide = Stream.of(primaryNode).collect(Collectors.toCollection(HashSet::new)); + NetworkDisruption networkDisruption = new NetworkDisruption( + new NetworkDisruption.TwoPartitions(nodesInOneSide, nodesInOtherSide), + NetworkDisruption.DISCONNECT + ); + internalCluster().setDisruptionScheme(networkDisruption); + logger.info("--> network disruption is started"); + networkDisruption.startDisrupting(); + + // Ensure the node which is partitioned is removed from the cluster + assertBusy(() -> { + NodesInfoResponse response = client(clusterManagerNode).admin().cluster().prepareNodesInfo().get(); + assertThat(response.getNodes().size(), equalTo(2)); + }); + + // Ensure that the cluster manager has latest information about the index + assertBusy(() -> { + ClusterHealthResponse clusterHealthResponse = client(clusterManagerNode).admin() + .cluster() + .health(new ClusterHealthRequest()) + .actionGet(TimeValue.timeValueSeconds(1)); + assertTrue(clusterHealthResponse.getIndices().containsKey(INDEX_NAME)); + ClusterIndexHealth clusterIndexHealth = clusterHealthResponse.getIndices().get(INDEX_NAME); + assertEquals(ClusterHealthStatus.YELLOW, clusterHealthResponse.getStatus()); + assertEquals(1, clusterIndexHealth.getNumberOfShards()); + assertEquals(1, clusterIndexHealth.getActiveShards()); + assertEquals(1, clusterIndexHealth.getUnassignedShards()); + assertEquals(1, clusterIndexHealth.getUnassignedShards()); + assertEquals(1, clusterIndexHealth.getActivePrimaryShards()); + assertEquals(ClusterHealthStatus.YELLOW, clusterIndexHealth.getStatus()); + }); + + // Index data to the newly promoted primary + indexSameDoc(clusterManagerNode, INDEX_NAME); + RefreshResponse refreshResponse = client(clusterManagerNode).admin() + .indices() + .prepareRefresh(INDEX_NAME) + .setIndicesOptions(IndicesOptions.STRICT_EXPAND_OPEN_HIDDEN_FORBID_CLOSED) + .execute() + .actionGet(); + assertNoFailures(refreshResponse); + assertEquals(1, refreshResponse.getSuccessfulShards()); + assertHitCount(client(replicaNode).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), numOfDocs + 1); + + // At this point we stop the disruption. Since the follower checker has already failed and cluster manager has removed the node + // from cluster, failed node needs to start discovery process by leader checker call. We stop the disruption to allow the failed + // node to + // communicate with the other node which it assumes has replica. + networkDisruption.stopDisrupting(); + + // When the index call is made to the stale primary, it makes the primary term validation call to the other node (which + // it assumes has the replica node). At this moment, the stale primary realises that it is no more the primary and the caller + // received the following exception. + ShardNotFoundException exception = assertThrows(ShardNotFoundException.class, () -> indexSameDoc(primaryNode, INDEX_NAME)); + assertTrue(exception.getMessage().contains("no such shard")); + ensureStableCluster(3); + ensureGreen(INDEX_NAME); + } + + private IndexResponse indexSameDoc(String nodeName, String indexName) { + return client(nodeName).prepareIndex(indexName) + .setId(UUIDs.randomBase64UUID()) + .setSource("{\"foo\" : \"bar\"}", XContentType.JSON) + .get(); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureIT.java index c46eab6468c6b..64d5f06f061a9 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureIT.java @@ -11,40 +11,111 @@ import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStats; import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsResponse; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.opensearch.common.bytes.BytesArray; +import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.ByteSizeUnit; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.index.remote.RemoteRefreshSegmentTracker; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.snapshots.mockstore.MockRepository; import org.opensearch.test.OpenSearchIntegTestCase; import java.nio.file.Path; import java.util.Arrays; import java.util.List; import java.util.Locale; +import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; +import static org.opensearch.index.remote.RemoteRefreshSegmentPressureSettings.MIN_CONSECUTIVE_FAILURES_LIMIT; import static org.opensearch.index.remote.RemoteRefreshSegmentPressureSettings.REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteStoreBackpressureIT extends AbstractRemoteStoreMockRepositoryIntegTestCase { + public void testWritesRejectedDueToConsecutiveFailureBreach() throws Exception { + // Here the doc size of the request remains same throughout the test. After initial indexing, all remote store interactions + // fail leading to consecutive failure limit getting exceeded and leading to rejections. + validateBackpressure(ByteSizeUnit.KB.toIntBytes(1), 10, ByteSizeUnit.KB.toIntBytes(1), 15, "failure_streak_count"); + } + + public void testWritesRejectedDueToBytesLagBreach() throws Exception { + // Initially indexing happens with doc size of 2 bytes, then all remote store interactions start failing. Now, the + // indexing happens with doc size of 1KB leading to bytes lag limit getting exceeded and leading to rejections. + validateBackpressure(ByteSizeUnit.BYTES.toIntBytes(2), 30, ByteSizeUnit.KB.toIntBytes(1), 15, "bytes_lag"); + } - public void testWritesRejected() { + public void testWritesRejectedDueToTimeLagBreach() throws Exception { + // Initially indexing happens with doc size of 1KB, then all remote store interactions start failing. Now, the + // indexing happens with doc size of 1 byte leading to time lag limit getting exceeded and leading to rejections. + validateBackpressure(ByteSizeUnit.KB.toIntBytes(1), 20, ByteSizeUnit.BYTES.toIntBytes(1), 15, "time_lag"); + } + + private void validateBackpressure( + int initialDocSize, + int initialDocsToIndex, + int onFailureDocSize, + int onFailureDocsToIndex, + String breachMode + ) throws Exception { Path location = randomRepoPath().toAbsolutePath(); - setup(location, 1d, "metadata", Long.MAX_VALUE); + String dataNodeName = setup(location, 0d, "metadata", Long.MAX_VALUE); - Settings request = Settings.builder().put(REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED.getKey(), true).build(); + Settings request = Settings.builder() + .put(REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED.getKey(), true) + .put(MIN_CONSECUTIVE_FAILURES_LIMIT.getKey(), 10) + .build(); ClusterUpdateSettingsResponse clusterUpdateResponse = client().admin() .cluster() .prepareUpdateSettings() .setPersistentSettings(request) .get(); assertEquals(clusterUpdateResponse.getPersistentSettings().get(REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED.getKey()), "true"); + assertEquals(clusterUpdateResponse.getPersistentSettings().get(MIN_CONSECUTIVE_FAILURES_LIMIT.getKey()), "10"); logger.info("--> Indexing data"); + + String jsonString = generateString(initialDocSize); + BytesReference initialSource = new BytesArray(jsonString); + indexDocAndRefresh(initialSource, initialDocsToIndex); + + ((MockRepository) internalCluster().getInstance(RepositoriesService.class, dataNodeName).repository(REPOSITORY_NAME)) + .setRandomControlIOExceptionRate(1d); + + jsonString = generateString(onFailureDocSize); + BytesReference onFailureSource = new BytesArray(jsonString); OpenSearchRejectedExecutionException ex = assertThrows( OpenSearchRejectedExecutionException.class, - () -> indexData(randomIntBetween(10, 20), randomBoolean()) + () -> indexDocAndRefresh(onFailureSource, onFailureDocsToIndex) ); assertTrue(ex.getMessage().contains("rejected execution on primary shard")); + assertTrue(ex.getMessage().contains(breachMode)); + + RemoteRefreshSegmentTracker.Stats stats = stats(); + assertTrue(stats.bytesLag > 0); + assertTrue(stats.refreshTimeLagMs > 0); + assertTrue(stats.localRefreshNumber - stats.remoteRefreshNumber > 0); + assertTrue(stats.rejectionCount > 0); + + ((MockRepository) internalCluster().getInstance(RepositoriesService.class, dataNodeName).repository(REPOSITORY_NAME)) + .setRandomControlIOExceptionRate(0d); + + assertBusy(() -> { + RemoteRefreshSegmentTracker.Stats finalStats = stats(); + assertEquals(0, finalStats.bytesLag); + assertEquals(0, finalStats.refreshTimeLagMs); + assertEquals(0, finalStats.localRefreshNumber - finalStats.remoteRefreshNumber); + }, 30, TimeUnit.SECONDS); + + long rejectionCount = stats.rejectionCount; + stats = stats(); + indexDocAndRefresh(initialSource, initialDocsToIndex); + assertEquals(rejectionCount, stats.rejectionCount); + deleteRepo(); + } + + private RemoteRefreshSegmentTracker.Stats stats() { String shardId = "0"; RemoteStoreStatsResponse response = client().admin().cluster().prepareRemoteStoreStats(INDEX_NAME, shardId).get(); final String indexShardId = String.format(Locale.ROOT, "[%s][%s]", INDEX_NAME, shardId); @@ -52,11 +123,37 @@ public void testWritesRejected() { .filter(stat -> indexShardId.equals(stat.getStats().shardId.toString())) .collect(Collectors.toList()); assertEquals(1, matches.size()); - RemoteRefreshSegmentTracker.Stats stats = matches.get(0).getStats(); - assertTrue(stats.bytesLag > 0); - assertTrue(stats.refreshTimeLagMs > 0); - assertTrue(stats.localRefreshNumber - stats.remoteRefreshNumber > 0); - assertTrue(stats.rejectionCount > 0); - deleteRepo(); + return matches.get(0).getStats(); + } + + private void indexDocAndRefresh(BytesReference source, int iterations) { + for (int i = 0; i < iterations; i++) { + client().prepareIndex(INDEX_NAME).setSource(source, XContentType.JSON).get(); + refresh(INDEX_NAME); + } + } + + /** + * Generates string of given sizeInBytes + * + * @param sizeInBytes size of the string + * @return the generated string + */ + private String generateString(int sizeInBytes) { + StringBuilder sb = new StringBuilder(); + sb.append("{"); + int i = 0; + // Based on local tests, 1 char is occupying 1 byte + while (sb.length() < sizeInBytes) { + String key = "field" + i; + String value = "value" + i; + sb.append("\"").append(key).append("\":\"").append(value).append("\","); + i++; + } + if (sb.length() > 1 && sb.charAt(sb.length() - 1) == ',') { + sb.setLength(sb.length() - 1); + } + sb.append("}"); + return sb.toString(); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java index 0ffa5ab23e0b6..2b3fcadfc645e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java @@ -9,12 +9,12 @@ package org.opensearch.remotestore; import org.junit.After; -import org.junit.Before; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; +import org.opensearch.index.mapper.MapperService; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.OpenSearchIntegTestCase; @@ -62,24 +62,44 @@ private Settings defaultIndexSettings() { .build(); } - protected Settings remoteStoreIndexSettings(int numberOfReplicas) { + protected Settings remoteStoreIndexSettings(int numberOfReplicas, int numberOfShards) { return Settings.builder() .put(defaultIndexSettings()) - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numberOfShards) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numberOfReplicas) .build(); } - protected Settings remoteTranslogIndexSettings(int numberOfReplicas) { + protected Settings remoteStoreIndexSettings(int numberOfReplicas) { + return remoteStoreIndexSettings(numberOfReplicas, 1); + } + + protected Settings remoteStoreIndexSettings(int numberOfReplicas, long totalFieldLimit) { return Settings.builder() .put(remoteStoreIndexSettings(numberOfReplicas)) + .put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), totalFieldLimit) + .build(); + } + + protected Settings remoteTranslogIndexSettings(int numberOfReplicas, int numberOfShards) { + return Settings.builder() + .put(remoteStoreIndexSettings(numberOfReplicas, numberOfShards)) .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_ENABLED, true) .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, REPOSITORY_NAME) .build(); } - @Before - public void setup() { + protected Settings remoteTranslogIndexSettings(int numberOfReplicas) { + return remoteTranslogIndexSettings(numberOfReplicas, 1); + } + + protected void putRepository(Path path) { + assertAcked( + clusterAdmin().preparePutRepository(REPOSITORY_NAME).setType("fs").setSettings(Settings.builder().put("location", path)) + ); + } + + protected void setupRepo() { internalCluster().startClusterManagerOnlyNode(); absolutePath = randomRepoPath().toAbsolutePath(); assertAcked( diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java index 70a41d74a57c5..77de601b53ec6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java @@ -8,6 +8,7 @@ package org.opensearch.remotestore; +import org.junit.Before; import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; import org.opensearch.action.admin.indices.recovery.RecoveryResponse; @@ -17,6 +18,7 @@ import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; +import org.opensearch.index.shard.RemoteStoreRefreshListener; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.plugins.Plugin; import org.opensearch.test.InternalTestCluster; @@ -50,6 +52,11 @@ protected Collection> nodePlugins() { return Arrays.asList(MockTransportService.TestPlugin.class); } + @Before + public void setup() { + setupRepo(); + } + @Override public Settings indexSettings() { return remoteStoreIndexSettings(0); @@ -277,4 +284,42 @@ public void testRemoteSegmentCleanup() throws Exception { public void testRemoteTranslogCleanup() throws Exception { verifyRemoteStoreCleanup(true); } + + public void testStaleCommitDeletionWithInvokeFlush() throws Exception { + internalCluster().startDataOnlyNodes(3); + createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l)); + int numberOfIterations = randomIntBetween(5, 15); + indexData(numberOfIterations, true); + String indexUUID = client().admin() + .indices() + .prepareGetSettings(INDEX_NAME) + .get() + .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); + Path indexPath = Path.of(String.valueOf(absolutePath), indexUUID, "/0/segments/metadata"); + // Delete is async. + assertBusy(() -> { + int actualFileCount = getFileCount(indexPath); + if (numberOfIterations <= RemoteStoreRefreshListener.LAST_N_METADATA_FILES_TO_KEEP) { + assertEquals(numberOfIterations, actualFileCount); + } else { + // As delete is async its possible that the file gets created before the deletion or after + // deletion. + assertTrue(actualFileCount >= 10 || actualFileCount <= 11); + } + }, 30, TimeUnit.SECONDS); + } + + public void testStaleCommitDeletionWithoutInvokeFlush() throws Exception { + internalCluster().startDataOnlyNodes(3); + createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l)); + int numberOfIterations = randomIntBetween(5, 15); + indexData(numberOfIterations, false); + String indexUUID = client().admin() + .indices() + .prepareGetSettings(INDEX_NAME) + .get() + .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); + Path indexPath = Path.of(String.valueOf(absolutePath), indexUUID, "/0/segments/metadata"); + assertEquals(1, getFileCount(indexPath)); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java index 3c5853f9a64e9..0e4774c1f3454 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java @@ -8,28 +8,36 @@ package org.opensearch.remotestore; +import org.junit.Before; import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStats; +import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsRequestBuilder; import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsResponse; import org.opensearch.action.index.IndexResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.UUIDs; import org.opensearch.index.remote.RemoteRefreshSegmentTracker; +import org.opensearch.test.OpenSearchIntegTestCase; import java.util.Arrays; import java.util.List; import java.util.Locale; import java.util.stream.Collectors; +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 3) public class RemoteStoreStatsIT extends RemoteStoreBaseIntegTestCase { private static final String INDEX_NAME = "remote-store-test-idx-1"; + @Before + public void setup() { + setupRepo(); + } + public void testStatsResponseFromAllNodes() { // Step 1 - We create cluster, create an index, and then index documents into. We also do multiple refreshes/flushes // during this time frame. This ensures that the segment upload has started. - internalCluster().startDataOnlyNodes(3); if (randomBoolean()) { createIndex(INDEX_NAME, remoteTranslogIndexSettings(0)); } else { @@ -38,18 +46,7 @@ public void testStatsResponseFromAllNodes() { ensureYellowAndNoInitializingShards(INDEX_NAME); ensureGreen(INDEX_NAME); - // Indexing documents along with refreshes and flushes. - for (int i = 0; i < randomIntBetween(5, 10); i++) { - if (randomBoolean()) { - flush(INDEX_NAME); - } else { - refresh(INDEX_NAME); - } - int numberOfOperations = randomIntBetween(20, 50); - for (int j = 0; j < numberOfOperations; j++) { - indexSingleDoc(); - } - } + indexDocs(); // Step 2 - We find all the nodes that are present in the cluster. We make the remote store stats api call from // each of the node in the cluster and check that the response is coming as expected. @@ -66,23 +63,93 @@ public void testStatsResponseFromAllNodes() { .collect(Collectors.toList()); assertEquals(1, matches.size()); RemoteRefreshSegmentTracker.Stats stats = matches.get(0).getStats(); - assertEquals(0, stats.refreshTimeLagMs); - assertEquals(stats.localRefreshNumber, stats.remoteRefreshNumber); - assertTrue(stats.uploadBytesStarted > 0); - assertEquals(0, stats.uploadBytesFailed); - assertTrue(stats.uploadBytesSucceeded > 0); - assertTrue(stats.totalUploadsStarted > 0); - assertEquals(0, stats.totalUploadsFailed); - assertTrue(stats.totalUploadsSucceeded > 0); - assertEquals(0, stats.rejectionCount); - assertEquals(0, stats.consecutiveFailuresCount); - assertEquals(0, stats.bytesLag); - assertTrue(stats.uploadBytesMovingAverage > 0); - assertTrue(stats.uploadBytesPerSecMovingAverage > 0); - assertTrue(stats.uploadTimeMovingAverage > 0); + assertResponseStats(stats); + } + } + + public void testStatsResponseAllShards() { + + // Step 1 - We create cluster, create an index, and then index documents into. We also do multiple refreshes/flushes + // during this time frame. This ensures that the segment upload has started. + createIndex(INDEX_NAME, remoteTranslogIndexSettings(0, 3)); + ensureYellowAndNoInitializingShards(INDEX_NAME); + ensureGreen(INDEX_NAME); + + indexDocs(); + + // Step 2 - We find all the nodes that are present in the cluster. We make the remote store stats api call from + // each of the node in the cluster and check that the response is coming as expected. + ClusterState state = getClusterState(); + String node = state.nodes().getDataNodes().values().stream().map(DiscoveryNode::getName).findFirst().get(); + RemoteStoreStatsRequestBuilder remoteStoreStatsRequestBuilder = client(node).admin() + .cluster() + .prepareRemoteStoreStats(INDEX_NAME, null); + RemoteStoreStatsResponse response = remoteStoreStatsRequestBuilder.get(); + assertTrue(response.getSuccessfulShards() == 3); + assertTrue(response.getShards() != null && response.getShards().length == 3); + RemoteRefreshSegmentTracker.Stats stats = response.getShards()[0].getStats(); + assertResponseStats(stats); + } + + public void testStatsResponseFromLocalNode() { + + // Step 1 - We create cluster, create an index, and then index documents into. We also do multiple refreshes/flushes + // during this time frame. This ensures that the segment upload has started. + createIndex(INDEX_NAME, remoteTranslogIndexSettings(0, 3)); + ensureYellowAndNoInitializingShards(INDEX_NAME); + ensureGreen(INDEX_NAME); + + indexDocs(); + + // Step 2 - We find a data node in the cluster. We make the remote store stats api call from + // each of the data node in the cluster and check that only local shards are returned. + ClusterState state = getClusterState(); + List nodes = state.nodes().getDataNodes().values().stream().map(DiscoveryNode::getName).collect(Collectors.toList()); + for (String node : nodes) { + RemoteStoreStatsRequestBuilder remoteStoreStatsRequestBuilder = client(node).admin() + .cluster() + .prepareRemoteStoreStats(INDEX_NAME, null); + remoteStoreStatsRequestBuilder.setLocal(true); + RemoteStoreStatsResponse response = remoteStoreStatsRequestBuilder.get(); + assertTrue(response.getSuccessfulShards() == 1); + assertTrue(response.getShards() != null && response.getShards().length == 1); + RemoteRefreshSegmentTracker.Stats stats = response.getShards()[0].getStats(); + assertResponseStats(stats); } } + private void indexDocs() { + // Indexing documents along with refreshes and flushes. + for (int i = 0; i < randomIntBetween(5, 10); i++) { + if (randomBoolean()) { + flush(INDEX_NAME); + } else { + refresh(INDEX_NAME); + } + int numberOfOperations = randomIntBetween(20, 50); + for (int j = 0; j < numberOfOperations; j++) { + indexSingleDoc(); + } + } + } + + private void assertResponseStats(RemoteRefreshSegmentTracker.Stats stats) { + assertEquals(0, stats.refreshTimeLagMs); + assertEquals(stats.localRefreshNumber, stats.remoteRefreshNumber); + assertTrue(stats.uploadBytesStarted > 0); + assertEquals(0, stats.uploadBytesFailed); + assertTrue(stats.uploadBytesSucceeded > 0); + assertTrue(stats.totalUploadsStarted > 0); + assertEquals(0, stats.totalUploadsFailed); + assertTrue(stats.totalUploadsSucceeded > 0); + assertEquals(0, stats.rejectionCount); + assertEquals(0, stats.consecutiveFailuresCount); + assertEquals(0, stats.bytesLag); + assertTrue(stats.uploadBytesMovingAverage > 0); + assertTrue(stats.uploadBytesPerSecMovingAverage > 0); + assertTrue(stats.uploadTimeMovingAverage > 0); + } + private IndexResponse indexSingleDoc() { return client().prepareIndex(INDEX_NAME) .setId(UUIDs.randomBase64UUID()) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/ReplicaToPrimaryPromotionIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/ReplicaToPrimaryPromotionIT.java index 712747f7479ae..0f3e041dd429a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/ReplicaToPrimaryPromotionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/ReplicaToPrimaryPromotionIT.java @@ -9,6 +9,7 @@ package org.opensearch.remotestore; import com.carrotsearch.randomizedtesting.RandomizedTest; +import org.junit.Before; import org.opensearch.action.admin.indices.close.CloseIndexResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; @@ -31,6 +32,11 @@ public class ReplicaToPrimaryPromotionIT extends RemoteStoreBaseIntegTestCase { private int shard_count = 5; + @Before + public void setup() { + setupRepo(); + } + @Override public Settings indexSettings() { return Settings.builder() diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationRemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationRemoteStoreIT.java index ab0c0cc3aec77..7e79812fcfeea 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationRemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationRemoteStoreIT.java @@ -8,7 +8,6 @@ package org.opensearch.remotestore; -import org.apache.lucene.tests.util.LuceneTestCase; import org.junit.After; import org.junit.Before; import org.opensearch.cluster.metadata.IndexMetadata; @@ -26,7 +25,6 @@ * This makes sure that the constructs/flows that are being tested with Segment Replication, holds true after enabling * remote store. */ -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/7643") @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class SegmentReplicationRemoteStoreIT extends SegmentReplicationIT { @@ -49,7 +47,7 @@ protected Settings featureFlagSettings() { } @Before - public void setup() { + private void setup() { internalCluster().startClusterManagerOnlyNode(); Path absolutePath = randomRepoPath().toAbsolutePath(); assertAcked( diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java index ad6e358cb9da1..01fb91f83aa02 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java @@ -63,4 +63,10 @@ public void setup() { public void teardown() { assertAcked(clusterAdmin().prepareDeleteRepository(REPOSITORY_NAME)); } + + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/7592") + @Override + public void testPressureServiceStats() throws Exception { + super.testPressureServiceStats(); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java b/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java index 1decd69ead7e3..a63c3287ea124 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java @@ -151,7 +151,7 @@ public void testSearchShardTaskCancellationWithHighElapsedTime() throws Interrup public void testSearchTaskCancellationWithHighCpu() throws InterruptedException { Settings request = Settings.builder() .put(SearchBackpressureSettings.SETTING_MODE.getKey(), "enforced") - .put(SearchTaskSettings.SETTING_CPU_TIME_MILLIS_THRESHOLD.getKey(), 50) + .put(SearchTaskSettings.SETTING_CPU_TIME_MILLIS_THRESHOLD.getKey(), 1000) .build(); assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(request).get()); @@ -182,7 +182,7 @@ public void testSearchTaskCancellationWithHighCpu() throws InterruptedException public void testSearchShardTaskCancellationWithHighCpu() throws InterruptedException { Settings request = Settings.builder() .put(SearchBackpressureSettings.SETTING_MODE.getKey(), "enforced") - .put(SearchShardTaskSettings.SETTING_CPU_TIME_MILLIS_THRESHOLD.getKey(), 50) + .put(SearchShardTaskSettings.SETTING_CPU_TIME_MILLIS_THRESHOLD.getKey(), 1000) .build(); assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(request).get()); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java index 8be14d1188db8..e362b7f61e8e6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java @@ -33,29 +33,44 @@ package org.opensearch.snapshots; import org.opensearch.action.ActionFuture; +import org.opensearch.action.DocWriteResponse; +import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; +import org.opensearch.action.admin.indices.get.GetIndexRequest; +import org.opensearch.action.admin.indices.get.GetIndexResponse; import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse; import org.opensearch.action.admin.indices.template.delete.DeleteIndexTemplateRequestBuilder; import org.opensearch.action.admin.indices.template.get.GetIndexTemplatesResponse; +import org.opensearch.action.delete.DeleteResponse; import org.opensearch.action.index.IndexRequestBuilder; +import org.opensearch.action.support.PlainActionFuture; import org.opensearch.client.Client; +import org.opensearch.client.Requests; import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.MappingMetadata; +import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.ByteSizeUnit; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.index.IndexSettings; import org.opensearch.indices.InvalidIndexNameException; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.repositories.RepositoriesService; import org.opensearch.rest.RestStatus; +import org.opensearch.test.InternalTestCluster; +import java.io.IOException; import java.nio.file.Path; import java.util.Collections; import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Arrays; +import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -70,6 +85,8 @@ import static org.hamcrest.Matchers.nullValue; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_REPOSITORY; import static org.opensearch.index.IndexSettings.INDEX_REFRESH_INTERVAL_SETTING; import static org.opensearch.index.IndexSettings.INDEX_SOFT_DELETES_SETTING; import static org.opensearch.index.query.QueryBuilders.matchQuery; @@ -81,6 +98,10 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertRequestBuilderThrows; public class RestoreSnapshotIT extends AbstractSnapshotIntegTestCase { + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(FeatureFlags.REMOTE_STORE, "true").build(); + } public void testParallelRestoreOperations() { String indexName1 = "testindex1"; @@ -152,6 +173,504 @@ public void testParallelRestoreOperations() { assertThat(client.prepareGet(restoredIndexName2, docId2).get().isExists(), equalTo(true)); } + public void testRestoreRemoteStoreIndicesWithRemoteTranslog() throws IOException, ExecutionException, InterruptedException { + testRestoreOperationsShallowCopyEnabled(true); + } + + public void testRestoreRemoteStoreIndicesWithoutRemoteTranslog() throws IOException, ExecutionException, InterruptedException { + testRestoreOperationsShallowCopyEnabled(false); + } + + public void testRestoreOperationsShallowCopyEnabled(boolean remoteTranslogEnabled) throws IOException, ExecutionException, + InterruptedException { + String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); + String primary = internalCluster().startDataOnlyNode(); + String indexName1 = "testindex1"; + String indexName2 = "testindex2"; + String snapshotRepoName = "test-restore-snapshot-repo"; + String remoteStoreRepoName = "test-rs-repo" + TEST_REMOTE_STORE_REPO_SUFFIX; + String snapshotName1 = "test-restore-snapshot1"; + String snapshotName2 = "test-restore-snapshot2"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + Path absolutePath2 = randomRepoPath().toAbsolutePath(); + logger.info("Snapshot Path [{}]", absolutePath1); + logger.info("Remote Store Repo Path [{}]", absolutePath2); + String restoredIndexName1 = indexName1 + "-restored"; + String restoredIndexName1Seg = indexName1 + "-restored-seg"; + String restoredIndexName1Doc = indexName1 + "-restored-doc"; + String restoredIndexName2 = indexName2 + "-restored"; + + createRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, true)); + createRepository(remoteStoreRepoName, "fs", absolutePath2); + + Client client = client(); + Settings indexSettings = getIndexSettings(true, remoteTranslogEnabled, remoteStoreRepoName, 1, 0).build(); + createIndex(indexName1, indexSettings); + + Settings indexSettings2 = getIndexSettings(false, false, null, 1, 0).build(); + createIndex(indexName2, indexSettings2); + + final int numDocsInIndex1 = 5; + final int numDocsInIndex2 = 6; + indexDocuments(client, indexName1, numDocsInIndex1); + indexDocuments(client, indexName2, numDocsInIndex2); + ensureGreen(indexName1, indexName2); + + internalCluster().startDataOnlyNode(); + logger.info("--> snapshot"); + CreateSnapshotResponse createSnapshotResponse = client.admin() + .cluster() + .prepareCreateSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(true) + .setIndices(indexName1, indexName2) + .get(); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); + assertThat( + createSnapshotResponse.getSnapshotInfo().successfulShards(), + equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) + ); + assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); + + updateRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, false)); + CreateSnapshotResponse createSnapshotResponse2 = client.admin() + .cluster() + .prepareCreateSnapshot(snapshotRepoName, snapshotName2) + .setWaitForCompletion(true) + .setIndices(indexName1, indexName2) + .get(); + assertThat(createSnapshotResponse2.getSnapshotInfo().successfulShards(), greaterThan(0)); + assertThat( + createSnapshotResponse2.getSnapshotInfo().successfulShards(), + equalTo(createSnapshotResponse2.getSnapshotInfo().totalShards()) + ); + assertThat(createSnapshotResponse2.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); + + DeleteResponse deleteResponse = client().prepareDelete(indexName1, "0").execute().actionGet(); + assertEquals(deleteResponse.getResult(), DocWriteResponse.Result.DELETED); + indexDocuments(client, indexName1, numDocsInIndex1, numDocsInIndex1 + randomIntBetween(2, 5)); + ensureGreen(indexName1); + + RestoreSnapshotResponse restoreSnapshotResponse1 = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(false) + .setIndices(indexName1) + .setRenamePattern(indexName1) + .setRenameReplacement(restoredIndexName1) + .get(); + RestoreSnapshotResponse restoreSnapshotResponse2 = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName2) + .setWaitForCompletion(false) + .setIndices(indexName2) + .setRenamePattern(indexName2) + .setRenameReplacement(restoredIndexName2) + .get(); + assertEquals(restoreSnapshotResponse1.status(), RestStatus.ACCEPTED); + assertEquals(restoreSnapshotResponse2.status(), RestStatus.ACCEPTED); + ensureGreen(restoredIndexName1, restoredIndexName2); + assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1); + assertDocsPresentInIndex(client, restoredIndexName2, numDocsInIndex2); + + // deleting data for restoredIndexName1 and restoring from remote store. + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primary)); + ensureRed(restoredIndexName1); + // Re-initialize client to make sure we are not using client from stopped node. + client = client(clusterManagerNode); + assertAcked(client.admin().indices().prepareClose(restoredIndexName1)); + client.admin() + .cluster() + .restoreRemoteStore(new RestoreRemoteStoreRequest().indices(restoredIndexName1), PlainActionFuture.newFuture()); + ensureYellowAndNoInitializingShards(restoredIndexName1); + ensureGreen(restoredIndexName1); + assertDocsPresentInIndex(client(), restoredIndexName1, numDocsInIndex1); + // indexing some new docs and validating + indexDocuments(client, restoredIndexName1, numDocsInIndex1, numDocsInIndex1 + 2); + ensureGreen(restoredIndexName1); + assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1 + 2); + + // restore index as seg rep enabled with remote store and remote translog disabled + RestoreSnapshotResponse restoreSnapshotResponse3 = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(false) + .setIgnoreIndexSettings(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_ENABLED) + .setIndices(indexName1) + .setRenamePattern(indexName1) + .setRenameReplacement(restoredIndexName1Seg) + .get(); + assertEquals(restoreSnapshotResponse3.status(), RestStatus.ACCEPTED); + ensureGreen(restoredIndexName1Seg); + + GetIndexResponse getIndexResponse = client.admin() + .indices() + .getIndex(new GetIndexRequest().indices(restoredIndexName1Seg).includeDefaults(true)) + .get(); + indexSettings = getIndexResponse.settings().get(restoredIndexName1Seg); + assertNull(indexSettings.get(SETTING_REMOTE_STORE_ENABLED)); + assertNull(indexSettings.get(SETTING_REMOTE_STORE_REPOSITORY, null)); + assertEquals(ReplicationType.SEGMENT.toString(), indexSettings.get(IndexMetadata.SETTING_REPLICATION_TYPE)); + assertDocsPresentInIndex(client, restoredIndexName1Seg, numDocsInIndex1); + // indexing some new docs and validating + indexDocuments(client, restoredIndexName1Seg, numDocsInIndex1, numDocsInIndex1 + 2); + ensureGreen(restoredIndexName1Seg); + assertDocsPresentInIndex(client, restoredIndexName1Seg, numDocsInIndex1 + 2); + + // restore index as doc rep based from shallow copy snapshot + RestoreSnapshotResponse restoreSnapshotResponse4 = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(false) + .setIgnoreIndexSettings( + IndexMetadata.SETTING_REMOTE_STORE_ENABLED, + IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_ENABLED, + IndexMetadata.SETTING_REPLICATION_TYPE + ) + .setIndices(indexName1) + .setRenamePattern(indexName1) + .setRenameReplacement(restoredIndexName1Doc) + .get(); + assertEquals(restoreSnapshotResponse4.status(), RestStatus.ACCEPTED); + ensureGreen(restoredIndexName1Doc); + + getIndexResponse = client.admin() + .indices() + .getIndex(new GetIndexRequest().indices(restoredIndexName1Doc).includeDefaults(true)) + .get(); + indexSettings = getIndexResponse.settings().get(restoredIndexName1Doc); + assertNull(indexSettings.get(SETTING_REMOTE_STORE_ENABLED)); + assertNull(indexSettings.get(SETTING_REMOTE_STORE_REPOSITORY, null)); + assertNull(indexSettings.get(IndexMetadata.SETTING_REPLICATION_TYPE)); + assertDocsPresentInIndex(client, restoredIndexName1Doc, numDocsInIndex1); + // indexing some new docs and validating + indexDocuments(client, restoredIndexName1Doc, numDocsInIndex1, numDocsInIndex1 + 2); + ensureGreen(restoredIndexName1Doc); + assertDocsPresentInIndex(client, restoredIndexName1Doc, numDocsInIndex1 + 2); + } + + public void testRestoreInSameRemoteStoreEnabledIndex() throws IOException { + String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); + String primary = internalCluster().startDataOnlyNode(); + String indexName1 = "testindex1"; + String indexName2 = "testindex2"; + String snapshotRepoName = "test-restore-snapshot-repo"; + String remoteStoreRepoName = "test-rs-repo" + TEST_REMOTE_STORE_REPO_SUFFIX; + String snapshotName1 = "test-restore-snapshot1"; + String snapshotName2 = "test-restore-snapshot2"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + Path absolutePath2 = randomRepoPath().toAbsolutePath(); + logger.info("Snapshot Path [{}]", absolutePath1); + logger.info("Remote Store Repo Path [{}]", absolutePath2); + String restoredIndexName2 = indexName2 + "-restored"; + + boolean enableShallowCopy = randomBoolean(); + createRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, enableShallowCopy)); + createRepository(remoteStoreRepoName, "fs", absolutePath2); + + Client client = client(); + Settings indexSettings = getIndexSettings(true, randomBoolean(), remoteStoreRepoName, 1, 0).build(); + createIndex(indexName1, indexSettings); + + Settings indexSettings2 = getIndexSettings(false, false, null, 1, 0).build(); + createIndex(indexName2, indexSettings2); + + final int numDocsInIndex1 = 5; + final int numDocsInIndex2 = 6; + indexDocuments(client, indexName1, numDocsInIndex1); + indexDocuments(client, indexName2, numDocsInIndex2); + ensureGreen(indexName1, indexName2); + + internalCluster().startDataOnlyNode(); + logger.info("--> snapshot"); + CreateSnapshotResponse createSnapshotResponse = client.admin() + .cluster() + .prepareCreateSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(true) + .setIndices(indexName1, indexName2) + .get(); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); + assertThat( + createSnapshotResponse.getSnapshotInfo().successfulShards(), + equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) + ); + assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); + + updateRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, false)); + CreateSnapshotResponse createSnapshotResponse2 = client.admin() + .cluster() + .prepareCreateSnapshot(snapshotRepoName, snapshotName2) + .setWaitForCompletion(true) + .setIndices(indexName1, indexName2) + .get(); + assertThat(createSnapshotResponse2.getSnapshotInfo().successfulShards(), greaterThan(0)); + assertThat( + createSnapshotResponse2.getSnapshotInfo().successfulShards(), + equalTo(createSnapshotResponse2.getSnapshotInfo().totalShards()) + ); + assertThat(createSnapshotResponse2.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); + + DeleteResponse deleteResponse = client().prepareDelete(indexName1, "0").execute().actionGet(); + assertEquals(deleteResponse.getResult(), DocWriteResponse.Result.DELETED); + indexDocuments(client, indexName1, numDocsInIndex1, numDocsInIndex1 + randomIntBetween(2, 5)); + ensureGreen(indexName1); + + assertAcked(client().admin().indices().prepareClose(indexName1)); + + RestoreSnapshotResponse restoreSnapshotResponse1 = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(false) + .setIndices(indexName1) + .get(); + RestoreSnapshotResponse restoreSnapshotResponse2 = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName2) + .setWaitForCompletion(false) + .setIndices(indexName2) + .setRenamePattern(indexName2) + .setRenameReplacement(restoredIndexName2) + .get(); + assertEquals(restoreSnapshotResponse1.status(), RestStatus.ACCEPTED); + assertEquals(restoreSnapshotResponse2.status(), RestStatus.ACCEPTED); + ensureGreen(indexName1, restoredIndexName2); + assertDocsPresentInIndex(client, indexName1, numDocsInIndex1); + assertDocsPresentInIndex(client, restoredIndexName2, numDocsInIndex2); + + // deleting data for restoredIndexName1 and restoring from remote store. + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primary)); + ensureRed(indexName1); + // Re-initialize client to make sure we are not using client from stopped node. + client = client(clusterManagerNode); + assertAcked(client.admin().indices().prepareClose(indexName1)); + client.admin().cluster().restoreRemoteStore(new RestoreRemoteStoreRequest().indices(indexName1), PlainActionFuture.newFuture()); + ensureYellowAndNoInitializingShards(indexName1); + ensureGreen(indexName1); + assertDocsPresentInIndex(client(), indexName1, numDocsInIndex1); + // indexing some new docs and validating + indexDocuments(client, indexName1, numDocsInIndex1, numDocsInIndex1 + 2); + ensureGreen(indexName1); + assertDocsPresentInIndex(client, indexName1, numDocsInIndex1 + 2); + } + + public void testRestoreShallowCopySnapshotWithDifferentRepo() throws IOException { + String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); + String primary = internalCluster().startDataOnlyNode(); + String indexName1 = "testindex1"; + String indexName2 = "testindex2"; + String snapshotRepoName = "test-restore-snapshot-repo"; + String remoteStoreRepoName = "test-rs-repo" + TEST_REMOTE_STORE_REPO_SUFFIX; + String remoteStoreRepo2Name = "test-rs-repo-2" + TEST_REMOTE_STORE_REPO_SUFFIX; + String snapshotName1 = "test-restore-snapshot1"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + Path absolutePath2 = randomRepoPath().toAbsolutePath(); + Path absolutePath3 = randomRepoPath().toAbsolutePath(); + String restoredIndexName1 = indexName1 + "-restored"; + + createRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, false)); + createRepository(remoteStoreRepoName, "fs", absolutePath2); + createRepository(remoteStoreRepo2Name, "fs", absolutePath3); + + Client client = client(); + Settings indexSettings = getIndexSettings(true, true, remoteStoreRepoName, 1, 0).build(); + createIndex(indexName1, indexSettings); + + Settings indexSettings2 = getIndexSettings(false, false, null, 1, 0).build(); + createIndex(indexName2, indexSettings2); + + final int numDocsInIndex1 = 5; + final int numDocsInIndex2 = 6; + indexDocuments(client, indexName1, numDocsInIndex1); + indexDocuments(client, indexName2, numDocsInIndex2); + ensureGreen(indexName1, indexName2); + + internalCluster().startDataOnlyNode(); + + logger.info("--> snapshot"); + CreateSnapshotResponse createSnapshotResponse = client.admin() + .cluster() + .prepareCreateSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(true) + .setIndices(indexName1, indexName2) + .get(); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); + assertThat( + createSnapshotResponse.getSnapshotInfo().successfulShards(), + equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) + ); + assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); + + Settings remoteStoreIndexSettings = Settings.builder() + .put(IndexMetadata.SETTING_REMOTE_STORE_REPOSITORY, remoteStoreRepo2Name) + .build(); + // restore index as a remote store index with different remote store repo + RestoreSnapshotResponse restoreSnapshotResponse = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(false) + .setIndexSettings(remoteStoreIndexSettings) + .setIndices(indexName1) + .setRenamePattern(indexName1) + .setRenameReplacement(restoredIndexName1) + .get(); + assertEquals(restoreSnapshotResponse.status(), RestStatus.ACCEPTED); + ensureGreen(restoredIndexName1); + assertDocsPresentInIndex(client(), restoredIndexName1, numDocsInIndex1); + + // deleting data for restoredIndexName1 and restoring from remote store. + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primary)); + // Re-initialize client to make sure we are not using client from stopped node. + client = client(clusterManagerNode); + assertAcked(client.admin().indices().prepareClose(restoredIndexName1)); + client.admin() + .cluster() + .restoreRemoteStore(new RestoreRemoteStoreRequest().indices(restoredIndexName1), PlainActionFuture.newFuture()); + ensureYellowAndNoInitializingShards(restoredIndexName1); + ensureGreen(restoredIndexName1); + // indexing some new docs and validating + assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1); + indexDocuments(client, restoredIndexName1, numDocsInIndex1, numDocsInIndex1 + 2); + ensureGreen(restoredIndexName1); + assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1 + 2); + } + + private Settings.Builder getIndexSettings( + boolean enableRemoteStore, + boolean enableRemoteTranslog, + String remoteStoreRepo, + int numOfShards, + int numOfReplicas + ) { + Settings.Builder settingsBuilder = Settings.builder() + .put(super.indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numOfShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numOfReplicas); + if (enableRemoteStore) { + settingsBuilder.put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) + .put(IndexMetadata.SETTING_REMOTE_STORE_REPOSITORY, remoteStoreRepo) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "300s") + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT); + } + if (enableRemoteTranslog) { + settingsBuilder.put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_ENABLED, true) + .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, remoteStoreRepo) + .build(); + } + return settingsBuilder; + } + + public void testRestoreShallowSnapshotRepositoryOverriden() throws ExecutionException, InterruptedException { + String indexName1 = "testindex1"; + String snapshotRepoName = "test-restore-snapshot-repo"; + String remoteStoreRepoName = "test-rs-repo" + TEST_REMOTE_STORE_REPO_SUFFIX; + String remoteStoreRepoNameUpdated = "test-rs-repo-updated" + TEST_REMOTE_STORE_REPO_SUFFIX; + String snapshotName1 = "test-restore-snapshot1"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + Path absolutePath2 = randomRepoPath().toAbsolutePath(); + Path absolutePath3 = randomRepoPath().toAbsolutePath(); + String[] pathTokens = absolutePath1.toString().split("/"); + String basePath = pathTokens[pathTokens.length - 1]; + Arrays.copyOf(pathTokens, pathTokens.length - 1); + Path location = PathUtils.get(String.join("/", pathTokens)); + pathTokens = absolutePath2.toString().split("/"); + String basePath2 = pathTokens[pathTokens.length - 1]; + Arrays.copyOf(pathTokens, pathTokens.length - 1); + Path location2 = PathUtils.get(String.join("/", pathTokens)); + logger.info("Path 1 [{}]", absolutePath1); + logger.info("Path 2 [{}]", absolutePath2); + logger.info("Path 3 [{}]", absolutePath3); + String restoredIndexName1 = indexName1 + "-restored"; + + createRepository(snapshotRepoName, "fs", getRepositorySettings(location, basePath, true)); + createRepository(remoteStoreRepoName, "fs", absolutePath3); + + Client client = client(); + Settings indexSettings = Settings.builder() + .put(super.indexSettings()) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) + .put(IndexMetadata.SETTING_REMOTE_STORE_REPOSITORY, remoteStoreRepoName) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "300s") + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build(); + createIndex(indexName1, indexSettings); + + int numDocsInIndex1 = randomIntBetween(2, 5); + indexDocuments(client, indexName1, numDocsInIndex1); + + ensureGreen(indexName1); + + logger.info("--> snapshot"); + CreateSnapshotResponse createSnapshotResponse = client.admin() + .cluster() + .prepareCreateSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(true) + .setIndices(indexName1) + .get(); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); + assertThat( + createSnapshotResponse.getSnapshotInfo().successfulShards(), + equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) + ); + assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); + + createRepository(remoteStoreRepoName, "fs", absolutePath2); + + RestoreSnapshotResponse restoreSnapshotResponse = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(true) + .setIndices(indexName1) + .setRenamePattern(indexName1) + .setRenameReplacement(restoredIndexName1) + .get(); + + assertTrue(restoreSnapshotResponse.getRestoreInfo().failedShards() > 0); + + ensureRed(restoredIndexName1); + + client().admin().indices().close(Requests.closeIndexRequest(restoredIndexName1)).get(); + createRepository(remoteStoreRepoNameUpdated, "fs", absolutePath3); + RestoreSnapshotResponse restoreSnapshotResponse2 = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(true) + .setIndices(indexName1) + .setRenamePattern(indexName1) + .setRenameReplacement(restoredIndexName1) + .setSourceRemoteStoreRepository(remoteStoreRepoNameUpdated) + .get(); + + assertTrue(restoreSnapshotResponse2.getRestoreInfo().failedShards() == 0); + ensureGreen(restoredIndexName1); + assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1); + + // indexing some new docs and validating + indexDocuments(client, restoredIndexName1, numDocsInIndex1, numDocsInIndex1 + 2); + ensureGreen(restoredIndexName1); + assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1 + 2); + } + + private void indexDocuments(Client client, String indexName, int numOfDocs) { + indexDocuments(client, indexName, 0, numOfDocs); + } + + private void indexDocuments(Client client, String indexName, int fromId, int toId) { + for (int i = fromId; i < toId; i++) { + String id = Integer.toString(i); + client.prepareIndex(indexName).setId(id).setSource("text", "sometext").get(); + } + client.admin().indices().prepareFlush(indexName).get(); + } + + private void assertDocsPresentInIndex(Client client, String indexName, int numOfDocs) { + for (int i = 0; i < numOfDocs; i++) { + String id = Integer.toString(i); + logger.info("checking for index " + indexName + " with docId" + id); + assertTrue("doc with id" + id + " is not present for index " + indexName, client.prepareGet(indexName, id).get().isExists()); + } + } + public void testParallelRestoreOperationsFromSingleSnapshot() throws Exception { String indexName1 = "testindex1"; String indexName2 = "testindex2"; diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index 168fbae84fdf4..902ae7cc54e3f 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -294,8 +294,10 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsFilter; import org.opensearch.common.util.FeatureFlags; +import org.opensearch.extensions.ExtensionsManager; import org.opensearch.extensions.action.ExtensionProxyAction; import org.opensearch.extensions.action.ExtensionProxyTransportAction; +import org.opensearch.extensions.rest.RestInitializeExtensionAction; import org.opensearch.index.seqno.RetentionLeaseActions; import org.opensearch.identity.IdentityService; import org.opensearch.indices.SystemIndices; @@ -453,7 +455,7 @@ import org.opensearch.rest.action.search.RestPutSearchPipelineAction; import org.opensearch.rest.action.search.RestSearchAction; import org.opensearch.rest.action.search.RestSearchScrollAction; -import org.opensearch.rest.extensions.RestSendToExtensionAction; +import org.opensearch.extensions.rest.RestSendToExtensionAction; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; import org.opensearch.usage.UsageService; @@ -508,6 +510,7 @@ public class ActionModule extends AbstractModule { private final RequestValidators mappingRequestValidators; private final RequestValidators indicesAliasesRequestRequestValidators; private final ThreadPool threadPool; + private final ExtensionsManager extensionsManager; public ActionModule( Settings settings, @@ -521,7 +524,8 @@ public ActionModule( CircuitBreakerService circuitBreakerService, UsageService usageService, SystemIndices systemIndices, - IdentityService identityService + IdentityService identityService, + ExtensionsManager extensionsManager ) { this.settings = settings; this.indexNameExpressionResolver = indexNameExpressionResolver; @@ -530,6 +534,7 @@ public ActionModule( this.settingsFilter = settingsFilter; this.actionPlugins = actionPlugins; this.threadPool = threadPool; + this.extensionsManager = extensionsManager; actions = setupActions(actionPlugins); actionFilters = setupActionFilters(actionPlugins); dynamicActionRegistry = new DynamicActionRegistry(); @@ -947,6 +952,11 @@ public void initRestHandlers(Supplier nodesInCluster) { registerHandler.accept(new RestDeleteSearchPipelineAction()); } + // Extensions API + if (FeatureFlags.isEnabled(FeatureFlags.EXTENSIONS)) { + registerHandler.accept(new RestInitializeExtensionAction(extensionsManager)); + } + for (ActionPlugin plugin : actionPlugins) { for (RestHandler handler : plugin.getRestHandlers( settings, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java index c183562e2e85a..6b8e06594acb7 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java @@ -59,6 +59,7 @@ import org.opensearch.script.ScriptCacheStats; import org.opensearch.script.ScriptStats; import org.opensearch.search.backpressure.stats.SearchBackpressureStats; +import org.opensearch.search.pipeline.SearchPipelineStats; import org.opensearch.tasks.TaskCancellationStats; import org.opensearch.threadpool.ThreadPoolStats; import org.opensearch.transport.TransportStats; @@ -138,6 +139,9 @@ public class NodeStats extends BaseNodeResponse implements ToXContentFragment { @Nullable private TaskCancellationStats taskCancellationStats; + @Nullable + private SearchPipelineStats searchPipelineStats; + public NodeStats(StreamInput in) throws IOException { super(in); timestamp = in.readVLong(); @@ -189,6 +193,11 @@ public NodeStats(StreamInput in) throws IOException { } else { taskCancellationStats = null; } + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { // TODO Update to 2_9_0 when we backport to 2.x + searchPipelineStats = in.readOptionalWriteable(SearchPipelineStats::new); + } else { + searchPipelineStats = null; + } } public NodeStats( @@ -214,7 +223,8 @@ public NodeStats( @Nullable ClusterManagerThrottlingStats clusterManagerThrottlingStats, @Nullable WeightedRoutingStats weightedRoutingStats, @Nullable FileCacheStats fileCacheStats, - @Nullable TaskCancellationStats taskCancellationStats + @Nullable TaskCancellationStats taskCancellationStats, + @Nullable SearchPipelineStats searchPipelineStats ) { super(node); this.timestamp = timestamp; @@ -239,6 +249,7 @@ public NodeStats( this.weightedRoutingStats = weightedRoutingStats; this.fileCacheStats = fileCacheStats; this.taskCancellationStats = taskCancellationStats; + this.searchPipelineStats = searchPipelineStats; } public long getTimestamp() { @@ -371,6 +382,11 @@ public TaskCancellationStats getTaskCancellationStats() { return taskCancellationStats; } + @Nullable + public SearchPipelineStats getSearchPipelineStats() { + return searchPipelineStats; + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -411,6 +427,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_2_9_0)) { out.writeOptionalWriteable(taskCancellationStats); } + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { // TODO: Update to 2_9_0 once we backport to 2.x + out.writeOptionalWriteable(searchPipelineStats); + } } @Override @@ -498,6 +517,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (getTaskCancellationStats() != null) { getTaskCancellationStats().toXContent(builder, params); } + if (getSearchPipelineStats() != null) { + getSearchPipelineStats().toXContent(builder, params); + } return builder; } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java index 68f391b91507c..f37a837c6f0ef 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java @@ -211,7 +211,8 @@ public enum Metric { CLUSTER_MANAGER_THROTTLING("cluster_manager_throttling"), WEIGHTED_ROUTING_STATS("weighted_routing"), FILE_CACHE_STATS("file_cache"), - TASK_CANCELLATION("task_cancellation"); + TASK_CANCELLATION("task_cancellation"), + SEARCH_PIPELINE("search_pipeline"); private String metricName; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java index 6aadf546d30f7..660142f05bab2 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java @@ -123,7 +123,8 @@ protected NodeStats nodeOperation(NodeStatsRequest nodeStatsRequest) { NodesStatsRequest.Metric.CLUSTER_MANAGER_THROTTLING.containedIn(metrics), NodesStatsRequest.Metric.WEIGHTED_ROUTING_STATS.containedIn(metrics), NodesStatsRequest.Metric.FILE_CACHE_STATS.containedIn(metrics), - NodesStatsRequest.Metric.TASK_CANCELLATION.containedIn(metrics) + NodesStatsRequest.Metric.TASK_CANCELLATION.containedIn(metrics), + NodesStatsRequest.Metric.SEARCH_PIPELINE.containedIn(metrics) ); } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java index e127b44116b7e..9ab66d726854e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java @@ -113,6 +113,8 @@ private static StorageType fromString(String string) { private Settings indexSettings = EMPTY_SETTINGS; private String[] ignoreIndexSettings = Strings.EMPTY_ARRAY; private StorageType storageType = StorageType.LOCAL; + @Nullable + private String sourceRemoteStoreRepository = null; @Nullable // if any snapshot UUID will do private String snapshotUuid; @@ -148,6 +150,9 @@ public RestoreSnapshotRequest(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(Version.V_2_7_0)) { storageType = in.readEnum(StorageType.class); } + if (in.getVersion().onOrAfter(Version.V_2_9_0)) { + sourceRemoteStoreRepository = in.readOptionalString(); + } } @Override @@ -169,6 +174,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_2_7_0)) { out.writeEnum(storageType); } + if (out.getVersion().onOrAfter(Version.V_2_9_0)) { + out.writeOptionalString(sourceRemoteStoreRepository); + } } @Override @@ -521,6 +529,25 @@ public StorageType storageType() { return storageType; } + /** + * Sets Source Remote Store Repository for all the restored indices + * + * @param sourceRemoteStoreRepository name of the remote store repository that should be used for all restored indices. + */ + public RestoreSnapshotRequest setSourceRemoteStoreRepository(String sourceRemoteStoreRepository) { + this.sourceRemoteStoreRepository = sourceRemoteStoreRepository; + return this; + } + + /** + * Returns Source Remote Store Repository for all the restored indices + * + * @return source Remote Store Repository + */ + public String getSourceRemoteStoreRepository() { + return sourceRemoteStoreRepository; + } + /** * Parses restore definition * @@ -586,6 +613,12 @@ public RestoreSnapshotRequest source(Map source) { throw new IllegalArgumentException("malformed storage_type"); } + } else if (name.equals("source_remote_store_repository")) { + if (entry.getValue() instanceof String) { + setSourceRemoteStoreRepository((String) entry.getValue()); + } else { + throw new IllegalArgumentException("malformed source_remote_store_repository"); + } } else { if (IndicesOptions.isIndicesOptions(name) == false) { throw new IllegalArgumentException("Unknown parameter " + name); @@ -631,6 +664,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (storageType != null) { storageType.toXContent(builder); } + if (sourceRemoteStoreRepository != null) { + builder.field("source_remote_store_repository", sourceRemoteStoreRepository); + } builder.endObject(); return builder; } @@ -658,7 +694,8 @@ public boolean equals(Object o) { && Objects.equals(indexSettings, that.indexSettings) && Arrays.equals(ignoreIndexSettings, that.ignoreIndexSettings) && Objects.equals(snapshotUuid, that.snapshotUuid) - && Objects.equals(storageType, that.storageType); + && Objects.equals(storageType, that.storageType) + && Objects.equals(sourceRemoteStoreRepository, that.sourceRemoteStoreRepository); } @Override @@ -675,7 +712,8 @@ public int hashCode() { includeAliases, indexSettings, snapshotUuid, - storageType + storageType, + sourceRemoteStoreRepository ); result = 31 * result + Arrays.hashCode(indices); result = 31 * result + Arrays.hashCode(ignoreIndexSettings); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java index 0104637a00035..d9cca536d1c41 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java @@ -256,4 +256,12 @@ public RestoreSnapshotRequestBuilder setStorageType(RestoreSnapshotRequest.Stora request.storageType(storageType); return this; } + + /** + * Sets the source remote store repository name + */ + public RestoreSnapshotRequestBuilder setSourceRemoteStoreRepository(String repositoryName) { + request.setSourceRemoteStoreRepository(repositoryName); + return this; + } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodes.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodes.java index 828f3a2e5e842..25a915833c7e2 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodes.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodes.java @@ -40,6 +40,7 @@ import org.opensearch.action.admin.cluster.node.stats.NodeStats; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.common.metrics.OperationStats; import org.opensearch.common.network.NetworkModule; import org.opensearch.common.settings.Settings; import org.opensearch.common.transport.TransportAddress; @@ -800,18 +801,18 @@ static class IngestStats implements ToXContentFragment { pipelineIds.add(processorStats.getKey()); for (org.opensearch.ingest.IngestStats.ProcessorStat stat : processorStats.getValue()) { stats.compute(stat.getType(), (k, v) -> { - org.opensearch.ingest.IngestStats.Stats nodeIngestStats = stat.getStats(); + OperationStats nodeIngestStats = stat.getStats(); if (v == null) { return new long[] { - nodeIngestStats.getIngestCount(), - nodeIngestStats.getIngestFailedCount(), - nodeIngestStats.getIngestCurrent(), - nodeIngestStats.getIngestTimeInMillis() }; + nodeIngestStats.getCount(), + nodeIngestStats.getFailedCount(), + nodeIngestStats.getCurrent(), + nodeIngestStats.getTotalTimeInMillis() }; } else { - v[0] += nodeIngestStats.getIngestCount(); - v[1] += nodeIngestStats.getIngestFailedCount(); - v[2] += nodeIngestStats.getIngestCurrent(); - v[3] += nodeIngestStats.getIngestTimeInMillis(); + v[0] += nodeIngestStats.getCount(); + v[1] += nodeIngestStats.getFailedCount(); + v[2] += nodeIngestStats.getCurrent(); + v[3] += nodeIngestStats.getTotalTimeInMillis(); return v; } }); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java index 726f8a0de19ae..aee6dfddd203e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -167,6 +167,7 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq false, false, false, + false, false ); List shardsStats = new ArrayList<>(); diff --git a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java index 969e0edbbc9d6..48fac9e8c8d38 100644 --- a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java @@ -57,6 +57,7 @@ import org.opensearch.search.internal.InternalSearchResponse; import org.opensearch.search.internal.SearchContext; import org.opensearch.search.internal.ShardSearchRequest; +import org.opensearch.search.pipeline.PipelinedRequest; import org.opensearch.transport.Transport; import java.util.ArrayDeque; @@ -696,7 +697,11 @@ private void raisePhaseFailure(SearchPhaseExecutionException exception) { * @see #onShardResult(SearchPhaseResult, SearchShardIterator) */ final void onPhaseDone() { // as a tribute to @kimchy aka. finishHim() - executeNextPhase(this, getNextPhase(results, this)); + final SearchPhase nextPhase = getNextPhase(results, this); + if (request instanceof PipelinedRequest && nextPhase != null) { + ((PipelinedRequest) request).transformSearchPhaseResults(results, this, this.getName(), nextPhase.getName()); + } + executeNextPhase(this, nextPhase); } @Override diff --git a/server/src/main/java/org/opensearch/action/search/ArraySearchPhaseResults.java b/server/src/main/java/org/opensearch/action/search/ArraySearchPhaseResults.java index 61c81e6cda97a..653b0e8aedb9d 100644 --- a/server/src/main/java/org/opensearch/action/search/ArraySearchPhaseResults.java +++ b/server/src/main/java/org/opensearch/action/search/ArraySearchPhaseResults.java @@ -66,7 +66,7 @@ boolean hasResult(int shardIndex) { } @Override - AtomicArray getAtomicArray() { + public AtomicArray getAtomicArray() { return results; } } diff --git a/server/src/main/java/org/opensearch/action/search/CanMatchPreFilterSearchPhase.java b/server/src/main/java/org/opensearch/action/search/CanMatchPreFilterSearchPhase.java index ec4d45a0a7124..c026c72f77f00 100644 --- a/server/src/main/java/org/opensearch/action/search/CanMatchPreFilterSearchPhase.java +++ b/server/src/main/java/org/opensearch/action/search/CanMatchPreFilterSearchPhase.java @@ -94,7 +94,7 @@ final class CanMatchPreFilterSearchPhase extends AbstractSearchAsyncAction, SearchPhase> nextPhaseFactory, SearchPhaseContext context ) { - super("dfs_query"); + super(SearchPhaseName.DFS_QUERY.getName()); this.progressListener = context.getTask().getProgressListener(); this.queryResult = queryResult; this.searchResults = searchResults; diff --git a/server/src/main/java/org/opensearch/action/search/ExpandSearchPhase.java b/server/src/main/java/org/opensearch/action/search/ExpandSearchPhase.java index cdefe7c2c1712..618a5620ce093 100644 --- a/server/src/main/java/org/opensearch/action/search/ExpandSearchPhase.java +++ b/server/src/main/java/org/opensearch/action/search/ExpandSearchPhase.java @@ -62,7 +62,7 @@ final class ExpandSearchPhase extends SearchPhase { private final AtomicArray queryResults; ExpandSearchPhase(SearchPhaseContext context, InternalSearchResponse searchResponse, AtomicArray queryResults) { - super("expand"); + super(SearchPhaseName.EXPAND.getName()); this.context = context; this.searchResponse = searchResponse; this.queryResults = queryResults; diff --git a/server/src/main/java/org/opensearch/action/search/FetchSearchPhase.java b/server/src/main/java/org/opensearch/action/search/FetchSearchPhase.java index 31ec896856ce6..85a3d140977bb 100644 --- a/server/src/main/java/org/opensearch/action/search/FetchSearchPhase.java +++ b/server/src/main/java/org/opensearch/action/search/FetchSearchPhase.java @@ -92,7 +92,7 @@ final class FetchSearchPhase extends SearchPhase { SearchPhaseContext context, BiFunction, SearchPhase> nextPhaseFactory ) { - super("fetch"); + super(SearchPhaseName.FETCH.getName()); if (context.getNumShards() != resultConsumer.getNumShards()) { throw new IllegalStateException( "number of shards must match the length of the query results but doesn't:" diff --git a/server/src/main/java/org/opensearch/action/search/SearchPhase.java b/server/src/main/java/org/opensearch/action/search/SearchPhase.java index 50f0940754078..50b0cd8e01c1d 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchPhase.java +++ b/server/src/main/java/org/opensearch/action/search/SearchPhase.java @@ -34,6 +34,7 @@ import org.opensearch.common.CheckedRunnable; import java.io.IOException; +import java.util.Locale; import java.util.Objects; /** @@ -54,4 +55,13 @@ protected SearchPhase(String name) { public String getName() { return name; } + + /** + * Returns the SearchPhase name as {@link SearchPhaseName}. Exception will come if SearchPhase name is not defined + * in {@link SearchPhaseName} + * @return {@link SearchPhaseName} + */ + public SearchPhaseName getSearchPhaseName() { + return SearchPhaseName.valueOf(name.toUpperCase(Locale.ROOT)); + } } diff --git a/server/src/main/java/org/opensearch/action/search/SearchPhaseContext.java b/server/src/main/java/org/opensearch/action/search/SearchPhaseContext.java index 04b481249520b..018035f21179b 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchPhaseContext.java +++ b/server/src/main/java/org/opensearch/action/search/SearchPhaseContext.java @@ -50,7 +50,7 @@ * * @opensearch.internal */ -interface SearchPhaseContext extends Executor { +public interface SearchPhaseContext extends Executor { // TODO maybe we can make this concrete later - for now we just implement this in the base class for all initial phases /** diff --git a/server/src/main/java/org/opensearch/action/search/SearchPhaseName.java b/server/src/main/java/org/opensearch/action/search/SearchPhaseName.java new file mode 100644 index 0000000000000..b6f842cf2cce1 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/SearchPhaseName.java @@ -0,0 +1,31 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +/** + * Enum for different Search Phases in OpenSearch + * @opensearch.internal + */ +public enum SearchPhaseName { + QUERY("query"), + FETCH("fetch"), + DFS_QUERY("dfs_query"), + EXPAND("expand"), + CAN_MATCH("can_match"); + + private final String name; + + SearchPhaseName(final String name) { + this.name = name; + } + + public String getName() { + return name; + } +} diff --git a/server/src/main/java/org/opensearch/action/search/SearchPhaseResults.java b/server/src/main/java/org/opensearch/action/search/SearchPhaseResults.java index 1baea0e721c44..2e6068b1ecddc 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchPhaseResults.java +++ b/server/src/main/java/org/opensearch/action/search/SearchPhaseResults.java @@ -42,7 +42,7 @@ * * @opensearch.internal */ -abstract class SearchPhaseResults { +public abstract class SearchPhaseResults { private final int numShards; SearchPhaseResults(int numShards) { @@ -75,7 +75,13 @@ final int getNumShards() { void consumeShardFailure(int shardIndex) {} - AtomicArray getAtomicArray() { + /** + * Returns an {@link AtomicArray} of {@link Result}, which are nothing but the SearchPhaseResults + * for shards. The {@link Result} are of type {@link SearchPhaseResult} + * + * @return an {@link AtomicArray} of {@link Result} + */ + public AtomicArray getAtomicArray() { throw new UnsupportedOperationException(); } diff --git a/server/src/main/java/org/opensearch/action/search/SearchScrollAsyncAction.java b/server/src/main/java/org/opensearch/action/search/SearchScrollAsyncAction.java index 0b477624b15cc..899c7a3c1dabd 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchScrollAsyncAction.java +++ b/server/src/main/java/org/opensearch/action/search/SearchScrollAsyncAction.java @@ -266,7 +266,7 @@ protected SearchPhase sendResponsePhase( SearchPhaseController.ReducedQueryPhase queryPhase, final AtomicArray fetchResults ) { - return new SearchPhase("fetch") { + return new SearchPhase(SearchPhaseName.FETCH.getName()) { @Override public void run() throws IOException { sendResponse(queryPhase, fetchResults); diff --git a/server/src/main/java/org/opensearch/action/search/SearchScrollQueryThenFetchAsyncAction.java b/server/src/main/java/org/opensearch/action/search/SearchScrollQueryThenFetchAsyncAction.java index 4119cb1cf28a0..9c0721ef63ea6 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchScrollQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/opensearch/action/search/SearchScrollQueryThenFetchAsyncAction.java @@ -92,7 +92,7 @@ protected void executeInitialPhase( @Override protected SearchPhase moveToNextPhase(BiFunction clusterNodeLookup) { - return new SearchPhase("fetch") { + return new SearchPhase(SearchPhaseName.FETCH.getName()) { @Override public void run() { final SearchPhaseController.ReducedQueryPhase reducedQueryPhase = searchPhaseController.reducedScrollQueryPhase( diff --git a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java index 69f529fe1d00c..df2170cbe2af1 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java @@ -390,13 +390,12 @@ private void executeRequest( relativeStartNanos, System::nanoTime ); - SearchRequest searchRequest; + PipelinedRequest searchRequest; ActionListener listener; try { - PipelinedRequest pipelinedRequest = searchPipelineService.resolvePipeline(originalSearchRequest); - searchRequest = pipelinedRequest.transformedRequest(); + searchRequest = searchPipelineService.resolvePipeline(originalSearchRequest); listener = ActionListener.wrap( - r -> originalListener.onResponse(pipelinedRequest.transformResponse(r)), + r -> originalListener.onResponse(searchRequest.transformResponse(r)), originalListener::onFailure ); } catch (Exception e) { diff --git a/server/src/main/java/org/opensearch/bootstrap/Security.java b/server/src/main/java/org/opensearch/bootstrap/Security.java index 37c7fd5c0a96c..749c146de4f16 100644 --- a/server/src/main/java/org/opensearch/bootstrap/Security.java +++ b/server/src/main/java/org/opensearch/bootstrap/Security.java @@ -316,7 +316,6 @@ static void addFilePermissions(Permissions policy, Environment environment) thro addDirectoryPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.libDir(), "read,readlink", false); addDirectoryPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.modulesDir(), "read,readlink", false); addDirectoryPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.pluginsDir(), "read,readlink", false); - addDirectoryPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.extensionDir(), "read,readlink", false); addDirectoryPath(policy, "path.conf'", environment.configDir(), "read,readlink", false); // read-write dirs addDirectoryPath(policy, "java.io.tmpdir", environment.tmpDir(), "read,readlink,write,delete", false); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java index 08cfea1abf270..114051311ab97 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java @@ -447,7 +447,7 @@ public String executor() { } public void sendValidateJoinRequest(DiscoveryNode node, ClusterState state, ActionListener listener) { - if (node.getVersion().before(Version.V_3_0_0)) { + if (node.getVersion().before(Version.V_2_9_0)) { transportService.sendRequest( node, VALIDATE_JOIN_ACTION_NAME, diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index d701d53ba29a1..ddc603541e038 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -54,7 +54,6 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.common.Strings; @@ -92,7 +91,6 @@ import static org.opensearch.cluster.node.DiscoveryNodeFilters.OpType.OR; import static org.opensearch.common.settings.Settings.readSettingsFromStream; import static org.opensearch.common.settings.Settings.writeSettingsToStream; -import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; /** * Index metadata information @@ -299,8 +297,6 @@ public Iterator> settings() { public static final String SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY = "index.remote_store.translog.repository"; - public static final String SETTING_REMOTE_TRANSLOG_BUFFER_INTERVAL = "index.remote_store.translog.buffer_interval"; - /** * Used to specify if the index data should be persisted in the remote store. */ @@ -314,11 +310,8 @@ public void validate(final Boolean value) {} @Override public void validate(final Boolean value, final Map, Object> settings) { - final Object clusterSettingReplicationType = settings.get(CLUSTER_REPLICATION_TYPE_SETTING); final Object replicationType = settings.get(INDEX_REPLICATION_TYPE_SETTING); - if ((replicationType).equals(ReplicationType.SEGMENT) == false - && (clusterSettingReplicationType).equals(ReplicationType.SEGMENT) == false - && value == true) { + if (ReplicationType.SEGMENT.equals(replicationType) == false && value) { throw new IllegalArgumentException( "To enable " + INDEX_REMOTE_STORE_ENABLED_SETTING.getKey() @@ -332,7 +325,7 @@ public void validate(final Boolean value, final Map, Object> settings @Override public Iterator> settings() { - final List> settings = List.of(INDEX_REPLICATION_TYPE_SETTING, CLUSTER_REPLICATION_TYPE_SETTING); + final List> settings = List.of(INDEX_REPLICATION_TYPE_SETTING); return settings.iterator(); } }, @@ -449,45 +442,6 @@ public Iterator> settings() { Property.Final ); - public static final Setting INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING = Setting.timeSetting( - SETTING_REMOTE_TRANSLOG_BUFFER_INTERVAL, - TimeValue.timeValueMillis(100), - TimeValue.timeValueMillis(50), - new Setting.Validator<>() { - - @Override - public void validate(final TimeValue value) {} - - @Override - public void validate(final TimeValue value, final Map, Object> settings) { - if (value == null) { - throw new IllegalArgumentException( - "Setting " + SETTING_REMOTE_TRANSLOG_BUFFER_INTERVAL + " should be provided with a valid time value" - ); - } else { - final Boolean isRemoteTranslogStoreEnabled = (Boolean) settings.get(INDEX_REMOTE_TRANSLOG_STORE_ENABLED_SETTING); - if (isRemoteTranslogStoreEnabled == null || isRemoteTranslogStoreEnabled == false) { - throw new IllegalArgumentException( - "Setting " - + SETTING_REMOTE_TRANSLOG_BUFFER_INTERVAL - + " can only be set when " - + SETTING_REMOTE_TRANSLOG_STORE_ENABLED - + " is set to true" - ); - } - } - } - - @Override - public Iterator> settings() { - final List> settings = Collections.singletonList(INDEX_REMOTE_TRANSLOG_STORE_ENABLED_SETTING); - return settings.iterator(); - } - }, - Property.IndexScope, - Property.Final - ); - public static final String SETTING_AUTO_EXPAND_REPLICAS = "index.auto_expand_replicas"; public static final Setting INDEX_AUTO_EXPAND_REPLICAS_SETTING = AutoExpandReplicas.SETTING; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index 0b13b5273ba34..1fb73943c0657 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -72,6 +72,7 @@ import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.NamedXContentRegistry; @@ -136,6 +137,7 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; import static org.opensearch.cluster.metadata.Metadata.DEFAULT_REPLICA_COUNT_SETTING; +import static org.opensearch.common.util.FeatureFlags.REMOTE_STORE; import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_REPOSITORY_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_ENABLED_SETTING; @@ -938,7 +940,13 @@ static Settings aggregateIndexSettings( private static void updateReplicationStrategy(Settings.Builder settingsBuilder, Settings requestSettings, Settings clusterSettings) { if (CLUSTER_REPLICATION_TYPE_SETTING.exists(clusterSettings) && INDEX_REPLICATION_TYPE_SETTING.exists(requestSettings) == false) { settingsBuilder.put(SETTING_REPLICATION_TYPE, CLUSTER_REPLICATION_TYPE_SETTING.get(clusterSettings)); + return; } + if (INDEX_REPLICATION_TYPE_SETTING.exists(requestSettings) == true) { + settingsBuilder.put(SETTING_REPLICATION_TYPE, INDEX_REPLICATION_TYPE_SETTING.get(requestSettings)); + return; + } + settingsBuilder.put(SETTING_REPLICATION_TYPE, CLUSTER_REPLICATION_TYPE_SETTING.getDefault(clusterSettings)); } /** @@ -954,20 +962,19 @@ private static void updateRemoteStoreSettings(Settings.Builder settingsBuilder, return; } - // Verify REPLICATION_TYPE cluster level setting is not conflicting with Remote Store - if (INDEX_REPLICATION_TYPE_SETTING.exists(requestSettings) == false - && CLUSTER_REPLICATION_TYPE_SETTING.get(clusterSettings).equals(ReplicationType.DOCUMENT)) { + // Verify index has replication type as SEGMENT + if (ReplicationType.DOCUMENT.equals(ReplicationType.parseString(settingsBuilder.get(SETTING_REPLICATION_TYPE)))) { throw new IllegalArgumentException( "Cannot enable [" + SETTING_REMOTE_STORE_ENABLED + "] when [" - + CLUSTER_REPLICATION_TYPE_SETTING.getKey() + + SETTING_REPLICATION_TYPE + "] is " + ReplicationType.DOCUMENT ); } - settingsBuilder.put(SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT).put(SETTING_REMOTE_STORE_ENABLED, true); + settingsBuilder.put(SETTING_REMOTE_STORE_ENABLED, true); String remoteStoreRepo; if (Objects.equals(requestSettings.get(INDEX_REMOTE_STORE_ENABLED_SETTING.getKey()), "true")) { diff --git a/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java b/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java index ef86eb31e2817..41e06ac2a8b24 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java @@ -34,6 +34,7 @@ import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.Nullable; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.io.stream.Writeable; @@ -257,9 +258,11 @@ public static class SnapshotRecoverySource extends RecoverySource { private final IndexId index; private final Version version; private final boolean isSearchableSnapshot; + private final boolean remoteStoreIndexShallowCopy; + private final String sourceRemoteStoreRepository; public SnapshotRecoverySource(String restoreUUID, Snapshot snapshot, Version version, IndexId indexId) { - this(restoreUUID, snapshot, version, indexId, false); + this(restoreUUID, snapshot, version, indexId, false, false, null); } public SnapshotRecoverySource( @@ -267,13 +270,17 @@ public SnapshotRecoverySource( Snapshot snapshot, Version version, IndexId indexId, - boolean isSearchableSnapshot + boolean isSearchableSnapshot, + boolean remoteStoreIndexShallowCopy, + @Nullable String sourceRemoteStoreRepository ) { this.restoreUUID = restoreUUID; this.snapshot = Objects.requireNonNull(snapshot); this.version = Objects.requireNonNull(version); this.index = Objects.requireNonNull(indexId); this.isSearchableSnapshot = isSearchableSnapshot; + this.remoteStoreIndexShallowCopy = remoteStoreIndexShallowCopy; + this.sourceRemoteStoreRepository = sourceRemoteStoreRepository; } SnapshotRecoverySource(StreamInput in) throws IOException { @@ -286,6 +293,13 @@ public SnapshotRecoverySource( } else { isSearchableSnapshot = false; } + if (in.getVersion().onOrAfter(Version.V_2_9_0)) { + remoteStoreIndexShallowCopy = in.readBoolean(); + sourceRemoteStoreRepository = in.readOptionalString(); + } else { + remoteStoreIndexShallowCopy = false; + sourceRemoteStoreRepository = null; + } } public String restoreUUID() { @@ -314,6 +328,14 @@ public boolean isSearchableSnapshot() { return isSearchableSnapshot; } + public String sourceRemoteStoreRepository() { + return sourceRemoteStoreRepository; + } + + public boolean remoteStoreIndexShallowCopy() { + return remoteStoreIndexShallowCopy; + } + @Override protected void writeAdditionalFields(StreamOutput out) throws IOException { out.writeString(restoreUUID); @@ -323,6 +345,10 @@ protected void writeAdditionalFields(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_2_7_0)) { out.writeBoolean(isSearchableSnapshot); } + if (out.getVersion().onOrAfter(Version.V_2_9_0)) { + out.writeBoolean(remoteStoreIndexShallowCopy); + out.writeOptionalString(sourceRemoteStoreRepository); + } } @Override @@ -337,7 +363,9 @@ public void addAdditionalFields(XContentBuilder builder, ToXContent.Params param .field("version", version.toString()) .field("index", index.getName()) .field("restoreUUID", restoreUUID) - .field("isSearchableSnapshot", isSearchableSnapshot); + .field("isSearchableSnapshot", isSearchableSnapshot) + .field("remoteStoreIndexShallowCopy", remoteStoreIndexShallowCopy) + .field("sourceRemoteStoreRepository", sourceRemoteStoreRepository); } @Override @@ -359,12 +387,24 @@ public boolean equals(Object o) { && snapshot.equals(that.snapshot) && index.equals(that.index) && version.equals(that.version) - && isSearchableSnapshot == that.isSearchableSnapshot; + && isSearchableSnapshot == that.isSearchableSnapshot + && remoteStoreIndexShallowCopy == that.remoteStoreIndexShallowCopy + && sourceRemoteStoreRepository != null + ? sourceRemoteStoreRepository.equals(that.sourceRemoteStoreRepository) + : that.sourceRemoteStoreRepository == null; } @Override public int hashCode() { - return Objects.hash(restoreUUID, snapshot, index, version, isSearchableSnapshot); + return Objects.hash( + restoreUUID, + snapshot, + index, + version, + isSearchableSnapshot, + remoteStoreIndexShallowCopy, + sourceRemoteStoreRepository + ); } } diff --git a/server/src/main/java/org/opensearch/cluster/service/MasterService.java b/server/src/main/java/org/opensearch/cluster/service/MasterService.java index 9712fdbfbe8ec..ffc6f81490c27 100644 --- a/server/src/main/java/org/opensearch/cluster/service/MasterService.java +++ b/server/src/main/java/org/opensearch/cluster/service/MasterService.java @@ -291,14 +291,14 @@ private void runTasks(TaskInputs taskInputs) { return; } - final long computationStartTime = threadPool.relativeTimeInMillis(); + final long computationStartTime = threadPool.preciseRelativeTimeInNanos(); final TaskOutputs taskOutputs = calculateTaskOutputs(taskInputs, previousClusterState); taskOutputs.notifyFailedTasks(); final TimeValue computationTime = getTimeSince(computationStartTime); logExecutionTime(computationTime, "compute cluster state update", summary); if (taskOutputs.clusterStateUnchanged()) { - final long notificationStartTime = threadPool.relativeTimeInMillis(); + final long notificationStartTime = threadPool.preciseRelativeTimeInNanos(); taskOutputs.notifySuccessfulTasksOnUnchangedClusterState(); final TimeValue executionTime = getTimeSince(notificationStartTime); logExecutionTime(executionTime, "notify listeners on unchanged cluster state", summary); @@ -309,7 +309,7 @@ private void runTasks(TaskInputs taskInputs) { } else { logger.debug("cluster state updated, version [{}], source [{}]", newClusterState.version(), summary); } - final long publicationStartTime = threadPool.relativeTimeInMillis(); + final long publicationStartTime = threadPool.preciseRelativeTimeInNanos(); try { ClusterChangedEvent clusterChangedEvent = new ClusterChangedEvent(summary, newClusterState, previousClusterState); // new cluster state, notify all listeners @@ -335,8 +335,8 @@ private void runTasks(TaskInputs taskInputs) { } } - private TimeValue getTimeSince(long startTimeMillis) { - return TimeValue.timeValueMillis(Math.max(0, threadPool.relativeTimeInMillis() - startTimeMillis)); + private TimeValue getTimeSince(long startTimeNanos) { + return TimeValue.timeValueMillis(TimeValue.nsecToMSec(threadPool.preciseRelativeTimeInNanos() - startTimeNanos)); } protected void publish(ClusterChangedEvent clusterChangedEvent, TaskOutputs taskOutputs, long startTimeMillis) { @@ -358,7 +358,7 @@ protected boolean blockingAllowed() { } void onPublicationSuccess(ClusterChangedEvent clusterChangedEvent, TaskOutputs taskOutputs) { - final long notificationStartTime = threadPool.relativeTimeInMillis(); + final long notificationStartTime = threadPool.preciseRelativeTimeInNanos(); taskOutputs.processedDifferentClusterState(clusterChangedEvent.previousState(), clusterChangedEvent.state()); try { diff --git a/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java b/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java index ac38768c9f3d3..e626824e7e271 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java +++ b/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java @@ -32,10 +32,14 @@ package org.opensearch.common.blobstore; +import org.opensearch.action.ActionListener; + import java.io.IOException; import java.io.InputStream; import java.nio.file.FileAlreadyExistsException; import java.nio.file.NoSuchFileException; +import java.util.ArrayList; +import java.util.Comparator; import java.util.List; import java.util.Map; @@ -191,4 +195,47 @@ default long readBlobPreferredLength() { * @throws IOException if there were any failures in reading from the blob container. */ Map listBlobsByPrefix(String blobNamePrefix) throws IOException; + + /** + * The type representing sort order of blob names + */ + enum BlobNameSortOrder { + + LEXICOGRAPHIC(Comparator.comparing(BlobMetadata::name)); + + final Comparator comparator; + + public Comparator comparator() { + return comparator; + } + + BlobNameSortOrder(final Comparator comparator) { + this.comparator = comparator; + } + } + + /** + * Lists all blobs in the container that match the specified prefix in lexicographic order + * @param blobNamePrefix The prefix to match against blob names in the container. + * @param limit Limits the result size to min(limit, number of keys) + * @param blobNameSortOrder Comparator to sort keys with + * @param listener the listener to be notified upon request completion + */ + default void listBlobsByPrefixInSortedOrder( + String blobNamePrefix, + int limit, + BlobNameSortOrder blobNameSortOrder, + ActionListener> listener + ) { + if (limit < 0) { + throw new IllegalArgumentException("limit should not be a negative value"); + } + try { + List blobNames = new ArrayList<>(listBlobsByPrefix(blobNamePrefix).values()); + blobNames.sort(blobNameSortOrder.comparator()); + listener.onResponse(blobNames.subList(0, Math.min(blobNames.size(), limit))); + } catch (Exception e) { + listener.onFailure(e); + } + } } diff --git a/server/src/main/java/org/opensearch/common/metrics/MeanMetric.java b/server/src/main/java/org/opensearch/common/metrics/MeanMetric.java index 79c04d431e97b..33f12c8cb42d3 100644 --- a/server/src/main/java/org/opensearch/common/metrics/MeanMetric.java +++ b/server/src/main/java/org/opensearch/common/metrics/MeanMetric.java @@ -49,6 +49,11 @@ public void inc(long n) { sum.add(n); } + public void add(MeanMetric other) { + counter.add(other.counter.sum()); + sum.add(other.sum.sum()); + } + public void dec(long n) { counter.decrement(); sum.add(-n); diff --git a/server/src/main/java/org/opensearch/common/metrics/OperationMetrics.java b/server/src/main/java/org/opensearch/common/metrics/OperationMetrics.java new file mode 100644 index 0000000000000..97fbbc2ce5cde --- /dev/null +++ b/server/src/main/java/org/opensearch/common/metrics/OperationMetrics.java @@ -0,0 +1,68 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.metrics; + +import java.util.concurrent.atomic.AtomicLong; + +/** + * Mutable tracker of a repeated operation. + * + * @opensearch.internal + */ +public class OperationMetrics { + /** + * The mean time it takes to complete the measured item. + */ + private final MeanMetric time = new MeanMetric(); + /** + * The current count of things being measured. + * Useful when aggregating multiple metrics to see how many things are in flight. + */ + private final AtomicLong current = new AtomicLong(); + /** + * The non-decreasing count of failures + */ + private final CounterMetric failed = new CounterMetric(); + + /** + * Invoked before the given operation begins. + */ + public void before() { + current.incrementAndGet(); + } + + /** + * Invoked upon completion (success or failure) of the given operation + * @param currentTime elapsed time of the operation + */ + public void after(long currentTime) { + current.decrementAndGet(); + time.inc(currentTime); + } + + /** + * Invoked upon failure of the operation. + */ + public void failed() { + failed.inc(); + } + + public void add(OperationMetrics other) { + // Don't try copying over current, since in-flight requests will be linked to the existing metrics instance. + failed.inc(other.failed.count()); + time.add(other.time); + } + + /** + * @return an immutable snapshot of the current metric values. + */ + public OperationStats createStats() { + return new OperationStats(time.count(), time.sum(), current.get(), failed.count()); + } +} diff --git a/server/src/main/java/org/opensearch/common/metrics/OperationStats.java b/server/src/main/java/org/opensearch/common/metrics/OperationStats.java new file mode 100644 index 0000000000000..a820f848393bb --- /dev/null +++ b/server/src/main/java/org/opensearch/common/metrics/OperationStats.java @@ -0,0 +1,107 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.metrics; + +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.io.stream.Writeable; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; +import java.util.concurrent.TimeUnit; + +/** + * An immutable representation of a {@link OperationMetrics} + */ +public class OperationStats implements Writeable, ToXContentFragment { + private final long count; + private final long totalTimeInMillis; + private final long current; + private final long failedCount; + + public OperationStats(long count, long totalTimeInMillis, long current, long failedCount) { + this.count = count; + this.totalTimeInMillis = totalTimeInMillis; + this.current = current; + this.failedCount = failedCount; + } + + /** + * Read from a stream. + */ + public OperationStats(StreamInput in) throws IOException { + count = in.readVLong(); + totalTimeInMillis = in.readVLong(); + current = in.readVLong(); + failedCount = in.readVLong(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(count); + out.writeVLong(totalTimeInMillis); + out.writeVLong(current); + out.writeVLong(failedCount); + } + + /** + * @return The total number of executed operations. + */ + public long getCount() { + return count; + } + + /** + * @return The total time spent of in millis. + */ + public long getTotalTimeInMillis() { + return totalTimeInMillis; + } + + /** + * @return The total number of operations currently executing. + */ + public long getCurrent() { + return current; + } + + /** + * @return The total number of operations that have failed. + */ + public long getFailedCount() { + return failedCount; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.field("count", count) + .humanReadableField("time_in_millis", "time", new TimeValue(totalTimeInMillis, TimeUnit.MILLISECONDS)) + .field("current", current) + .field("failed", failedCount); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + OperationStats that = (OperationStats) o; + return Objects.equals(count, that.count) + && Objects.equals(totalTimeInMillis, that.totalTimeInMillis) + && Objects.equals(failedCount, that.failedCount) + && Objects.equals(current, that.current); + } + + @Override + public int hashCode() { + return Objects.hash(count, totalTimeInMillis, failedCount, current); + } +} diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index fe1d292dbd8f6..d70ea16cf5fdd 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -52,6 +52,7 @@ import org.opensearch.tasks.TaskManager; import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.tasks.consumer.TopNSearchTasksLogger; +import org.opensearch.telemetry.TelemetrySettings; import org.opensearch.watcher.ResourceWatcherService; import org.opensearch.action.admin.cluster.configuration.TransportAddVotingConfigExclusionsAction; import org.opensearch.action.admin.indices.close.TransportCloseIndexAction; @@ -675,6 +676,8 @@ public void apply(Settings value, Settings current, Settings previous) { IndicesService.CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING ), List.of(FeatureFlags.CONCURRENT_SEGMENT_SEARCH), - List.of(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) + List.of(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING), + List.of(FeatureFlags.TELEMETRY), + List.of(TelemetrySettings.TRACER_ENABLED_SETTING) ); } diff --git a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java index 19a5808579d50..dae66c79c63ec 100644 --- a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java @@ -39,7 +39,8 @@ protected FeatureFlagSettings( FeatureFlags.EXTENSIONS_SETTING, FeatureFlags.IDENTITY_SETTING, FeatureFlags.SEARCH_PIPELINE_SETTING, - FeatureFlags.CONCURRENT_SEGMENT_SEARCH_SETTING + FeatureFlags.CONCURRENT_SEGMENT_SEARCH_SETTING, + FeatureFlags.TELEMETRY_SETTING ) ) ); diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index 95c0f3b55222f..08d8199afface 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -205,6 +205,9 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexSettings.SEARCHABLE_SNAPSHOT_ID_NAME, IndexSettings.SEARCHABLE_SNAPSHOT_ID_UUID, + // Settings for remote translog + IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, + // validate that built-in similarities don't get redefined Setting.groupSetting("index.similarity.", (s) -> { Map groups = s.getAsGroups(); @@ -233,8 +236,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING, IndexMetadata.INDEX_REMOTE_STORE_REPOSITORY_SETTING, IndexMetadata.INDEX_REMOTE_TRANSLOG_STORE_ENABLED_SETTING, - IndexMetadata.INDEX_REMOTE_TRANSLOG_REPOSITORY_SETTING, - IndexMetadata.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING + IndexMetadata.INDEX_REMOTE_TRANSLOG_REPOSITORY_SETTING ), FeatureFlags.CONCURRENT_SEGMENT_SEARCH, List.of(IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_SETTING) diff --git a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java index a26b4006bb31e..2b4dab616d00f 100644 --- a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java +++ b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java @@ -63,6 +63,11 @@ public class FeatureFlags { */ public static final String CONCURRENT_SEGMENT_SEARCH = "opensearch.experimental.feature.concurrent_segment_search.enabled"; + /** + * Gates the functionality of telemetry framework. + */ + public static final String TELEMETRY = "opensearch.experimental.feature.telemetry.enabled"; + /** * Should store the settings from opensearch.yml. */ @@ -105,6 +110,8 @@ public static boolean isEnabled(String featureFlagName) { public static final Setting IDENTITY_SETTING = Setting.boolSetting(IDENTITY, false, Property.NodeScope); + public static final Setting TELEMETRY_SETTING = Setting.boolSetting(TELEMETRY, false, Property.NodeScope); + public static final Setting CONCURRENT_SEGMENT_SEARCH_SETTING = Setting.boolSetting( CONCURRENT_SEGMENT_SEARCH, false, diff --git a/server/src/main/java/org/opensearch/common/util/ReorganizingLongHash.java b/server/src/main/java/org/opensearch/common/util/ReorganizingLongHash.java new file mode 100644 index 0000000000000..5789b47423c1d --- /dev/null +++ b/server/src/main/java/org/opensearch/common/util/ReorganizingLongHash.java @@ -0,0 +1,307 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.util; + +import org.apache.lucene.util.hppc.BitMixer; +import org.opensearch.common.lease.Releasable; + +/** + * Specialized hash table implementation that maps a (primitive) long to long. + * + *

+ * It organizes itself by moving keys around dynamically in order to reduce the + * longest probe sequence length (PSL), which makes lookups faster as keys are likely to + * be found in the same CPU cache line. It also optimizes lookups for recently added keys, + * making it useful for aggregations where keys are correlated across consecutive hits. + * + *

+ * This class is not thread-safe. + * + * @opensearch.internal + */ +public class ReorganizingLongHash implements Releasable { + private static final long MAX_CAPACITY = 1L << 32; + private static final long DEFAULT_INITIAL_CAPACITY = 32; + private static final float DEFAULT_LOAD_FACTOR = 0.6f; + + /** + * Maximum load factor after which the capacity is doubled. + */ + private final float loadFactor; + + /** + * Utility class to allocate recyclable arrays. + */ + private final BigArrays bigArrays; + + /** + * Current capacity of the hash table. This must be a power of two so that the hash table slot + * can be identified quickly using bitmasks, thus avoiding expensive modulo or integer division. + */ + private long capacity; + + /** + * Bitmask to identify the hash table slot from a key's hash. + */ + private long mask; + + /** + * Size threshold after which the hash table needs to be doubled in capacity. + */ + private long grow; + + /** + * Current size of the hash table. + */ + private long size; + + /** + * Underlying array to store the hash table values. + * + *

+ * Each hash table value (64-bit) uses the following byte packing strategy: + *

+     * |=========|===============|================|================================|
+     * | Discard | PSL           | Fingerprint    | Ordinal                        |
+     * |    -    |---------------|----------------|--------------------------------|
+     * | 1 bit   | 15 bits       | 16 bits        | 32 bits                        |
+     * |=========|===============|================|================================|
+     * 
+ * + *

+ * This allows us to encode and manipulate additional information in the hash table + * itself without having to look elsewhere in the memory, which is much slower. + * + *

+ * Terminology: table[index] = value = (discard | psl | fingerprint | ordinal) + */ + private LongArray table; + + /** + * Underlying array to store the keys. + * + *

+ * Terminology: keys[ordinal] = key + */ + private LongArray keys; + + /** + * Bitmasks to manipulate the hash table values. + */ + private static final long MASK_ORDINAL = 0x00000000FFFFFFFFL; // extract ordinal + private static final long MASK_FINGERPRINT = 0x0000FFFF00000000L; // extract fingerprint + private static final long MASK_PSL = 0x7FFF000000000000L; // extract PSL + private static final long INCR_PSL = 0x0001000000000000L; // increment PSL by one + + public ReorganizingLongHash(final BigArrays bigArrays) { + this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR, bigArrays); + } + + public ReorganizingLongHash(final long initialCapacity, final float loadFactor, final BigArrays bigArrays) { + assert initialCapacity > 0 : "initial capacity must be greater than 0"; + assert loadFactor > 0 && loadFactor < 1 : "load factor must be between 0 and 1"; + + this.bigArrays = bigArrays; + this.loadFactor = loadFactor; + + capacity = nextPowerOfTwo((long) (initialCapacity / loadFactor)); + mask = capacity - 1; + grow = (long) (capacity * loadFactor); + size = 0; + + table = bigArrays.newLongArray(capacity, false); + table.fill(0, capacity, -1); // -1 represents an empty slot + keys = bigArrays.newLongArray(initialCapacity, false); + } + + /** + * Adds the given key to the hash table and returns its ordinal. + * If the key exists already, it returns (-1 - ordinal). + */ + public long add(final long key) { + final long ordinal = find(key); + if (ordinal != -1) { + return -1 - ordinal; + } + + if (size >= grow) { + grow(); + } + + return insert(key); + } + + /** + * Returns the key associated with the given ordinal. + * The result is undefined for an unused ordinal. + */ + public long get(final long ordinal) { + return keys.get(ordinal); + } + + /** + * Returns the ordinal associated with the given key, or -1 if the key doesn't exist. + * + *

+ * Using the 64-bit hash value, up to 32 least significant bits (LSB) are used to identify the + * home slot in the hash table, and an additional 16 bits are used to identify the fingerprint. + * The fingerprint further increases the entropy and reduces the number of false lookups in the + * keys' table during equality checks, which is expensive due to an uncorrelated memory lookup. + * + *

+ * Total entropy bits = 16 + log2(capacity) + * + *

+ * Linear probing starts from the home slot, until a match or an empty slot is found. + * Values are first checked using their fingerprint (to reduce false positives), then verified + * in the keys' table using an equality check. + */ + public long find(final long key) { + final long hash = hash(key); + final long fingerprint = hash & MASK_FINGERPRINT; + + for (long idx = hash & mask, value, ordinal;; idx = (idx + 1) & mask) { + if ((value = table.get(idx)) == -1) { + return -1; + } else if (((value & MASK_FINGERPRINT) == fingerprint) && (keys.get((ordinal = (value & MASK_ORDINAL))) == key)) { + return ordinal; + } + } + } + + /** + * Returns the number of mappings in this hash table. + */ + public long size() { + return size; + } + + /** + * Inserts the given key in the hash table and returns its ordinal. + * + *

+ * Inspired by Robin Hood Hashing (RHH): if the PSL for the existing value is less than the PSL + * for the value being inserted, swap the two values and keep going. Values that were inserted + * early and thus "lucked out" on their PSLs will gradually be moved away from their preferred + * slot as new values come in that could make better use of that place in the table. It evens out + * the PSLs across the board and reduces the longest PSL dramatically. + * + *

+ * A lower variance is better because, with modern CPU architectures, a PSL of 1 isn't much + * faster than a PSL of 3, because the main cost is fetching the cache line. The ideal hash + * table layout is the one where all values have equal PSLs, and that PSL fits within one cache line. + * + *

+ * The expected longest PSL for a full table: log(N) + * + *

+ * Our implementation has a slight variation on top of it: by loosening the guarantees provided + * by RHH, we can improve the performance on correlated lookups (such as aggregating on repeated + * timestamps) by moving the "recent" keys closer to their home slot, and eventually converging + * to the ideal hash table layout defined by RHH. + */ + private long insert(final long key) { + final long hash = hash(key); + final long fingerprint = hash & MASK_FINGERPRINT; + + // The ideal home slot for the given key. + long idx = hash & mask; + + // The value yet to find an empty slot (candidate). + long value = fingerprint | size; + + // The existing value at idx. + long existingValue; + + // Always set the newly inserted key at its ideal home slot, even if it doesn't conform + // to the RHH scheme (yet). This will ensure subsequent correlated lookups are fast due + // to no additional probing. When another insertion causes this value to be displaced, it + // will eventually be placed at an appropriate location defined by the RHH scheme. + if ((value = table.set(idx, value)) == -1) { + // The ideal home slot was already empty; append the key and return early. + return append(key); + } + + // Find an alternative slot for the displaced value such that the longest PSL is minimized. + do { + idx = (idx + 1) & mask; + value += INCR_PSL; + + if ((existingValue = table.get(idx)) == -1) { + // Empty slot; insert the candidate value here. + table.set(idx, value); + return append(key); + } else if ((existingValue & MASK_PSL) <= (value & MASK_PSL)) { + // Existing value is "richer" than the candidate value at this index; + // swap and find an alternative slot for the displaced value. + // In the case of a tie, the candidate value (i.e. the recent value) is chosen as + // the winner and kept closer to its ideal home slot in order to speed up + // correlated lookups. + value = table.set(idx, value); + } + } while (true); + } + + /** + * Appends the key in the keys' table. + */ + private long append(final long key) { + keys = bigArrays.grow(keys, size + 1); + keys.set(size, key); + return size++; + } + + /** + * Returns the hash for the given key. + * Visible for unit-tests. + */ + long hash(final long key) { + return BitMixer.mix64(key); + } + + /** + * Returns the underlying hash table. + * Visible for unit-tests. + */ + LongArray getTable() { + return table; + } + + /** + * Grows the hash table by doubling its capacity and reinserting the keys. + */ + private void grow() { + // Ensure that the hash table doesn't grow too large. + // This implicitly also ensures that the ordinals are no larger than 2^32, thus, + // preventing them from polluting other bits (PSL/fingerprint) in the hash table values. + assert capacity < MAX_CAPACITY : "hash table already at the max capacity"; + + final long oldSize = size; + capacity <<= 1; + mask = capacity - 1; + size = 0; + grow = (long) (capacity * loadFactor); + table = bigArrays.resize(table, capacity); + table.fill(0, capacity, -1); + + for (long ordinal = 0; ordinal < oldSize; ordinal++) { + insert(keys.get(ordinal)); + } + } + + @Override + public void close() { + table.close(); + keys.close(); + } + + private static long nextPowerOfTwo(final long value) { + return Math.max(1, Long.highestOneBit(value - 1) << 1); + } +} diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/BufferedAsyncIOProcessor.java b/server/src/main/java/org/opensearch/common/util/concurrent/BufferedAsyncIOProcessor.java index f3d909e1c92bc..7079aa705d126 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/BufferedAsyncIOProcessor.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/BufferedAsyncIOProcessor.java @@ -17,10 +17,11 @@ import java.util.List; import java.util.Objects; import java.util.function.Consumer; +import java.util.function.Supplier; /** * A variant of {@link AsyncIOProcessor} that allows to batch and buffer processing items at every - * {@link BufferedAsyncIOProcessor#bufferInterval} in a separate threadpool. + * {@link BufferedAsyncIOProcessor#getBufferInterval()} in a separate threadpool. *

* Requests are buffered till processor thread calls @{link drainAndProcessAndRelease} after bufferInterval. * If more requests are enqueued between invocations of drainAndProcessAndRelease, another processor thread @@ -32,18 +33,18 @@ public abstract class BufferedAsyncIOProcessor extends AsyncIOProcessor { private final ThreadPool threadpool; - private final TimeValue bufferInterval; + private final Supplier bufferIntervalSupplier; protected BufferedAsyncIOProcessor( Logger logger, int queueSize, ThreadContext threadContext, ThreadPool threadpool, - TimeValue bufferInterval + Supplier bufferIntervalSupplier ) { super(logger, queueSize, threadContext); this.threadpool = threadpool; - this.bufferInterval = bufferInterval; + this.bufferIntervalSupplier = bufferIntervalSupplier; } @Override @@ -81,11 +82,12 @@ private void process() { } private TimeValue getBufferInterval() { + long bufferInterval = bufferIntervalSupplier.get().getNanos(); long timeSinceLastRunStartInNS = System.nanoTime() - getLastRunStartTimeInNs(); - if (timeSinceLastRunStartInNS >= bufferInterval.getNanos()) { + if (timeSinceLastRunStartInNS >= bufferInterval) { return TimeValue.ZERO; } - return TimeValue.timeValueNanos(bufferInterval.getNanos() - timeSinceLastRunStartInNS); + return TimeValue.timeValueNanos(bufferInterval - timeSinceLastRunStartInNS); } protected abstract String getBufferProcessThreadPoolName(); diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java index 5b9a77c75dddb..025fb7a36b684 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java @@ -45,6 +45,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.http.HttpTransportSettings; import org.opensearch.tasks.Task; +import org.opensearch.tasks.TaskThreadContextStatePropagator; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -56,7 +57,9 @@ import java.util.LinkedHashSet; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; +import java.util.concurrent.CopyOnWriteArrayList; import java.util.function.BiConsumer; import java.util.function.BinaryOperator; import java.util.function.Function; @@ -66,7 +69,6 @@ import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_COUNT; import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_SIZE; -import static org.opensearch.tasks.TaskResourceTrackingService.TASK_ID; /** * A ThreadContext is a map of string headers and a transient map of keyed objects that are associated with @@ -113,6 +115,7 @@ public final class ThreadContext implements Writeable { private final ThreadLocal threadLocal; private final int maxWarningHeaderCount; private final long maxWarningHeaderSize; + private final List propagators; /** * Creates a new ThreadContext instance @@ -123,6 +126,15 @@ public ThreadContext(Settings settings) { this.threadLocal = ThreadLocal.withInitial(() -> DEFAULT_CONTEXT); this.maxWarningHeaderCount = SETTING_HTTP_MAX_WARNING_HEADER_COUNT.get(settings); this.maxWarningHeaderSize = SETTING_HTTP_MAX_WARNING_HEADER_SIZE.get(settings).getBytes(); + this.propagators = new CopyOnWriteArrayList<>(List.of(new TaskThreadContextStatePropagator())); + } + + public void registerThreadContextStatePropagator(final ThreadContextStatePropagator propagator) { + propagators.add(Objects.requireNonNull(propagator)); + } + + public void unregisterThreadContextStatePropagator(final ThreadContextStatePropagator propagator) { + propagators.remove(Objects.requireNonNull(propagator)); } /** @@ -147,8 +159,9 @@ public StoredContext stashContext() { ); } - if (context.transientHeaders.containsKey(TASK_ID)) { - threadContextStruct = threadContextStruct.putTransient(TASK_ID, context.transientHeaders.get(TASK_ID)); + final Map transientHeaders = propagateTransients(context.transientHeaders); + if (!transientHeaders.isEmpty()) { + threadContextStruct = threadContextStruct.putTransient(transientHeaders); } threadLocal.set(threadContextStruct); @@ -166,7 +179,10 @@ public StoredContext stashContext() { */ public Writeable captureAsWriteable() { final ThreadContextStruct context = threadLocal.get(); - return out -> context.writeTo(out, defaultHeader); + return out -> { + final Map propagatedHeaders = propagateHeaders(context.transientHeaders); + context.writeTo(out, defaultHeader, propagatedHeaders); + }; } /** @@ -224,17 +240,24 @@ public StoredContext newStoredContext(boolean preserveResponseHeaders) { */ public StoredContext newStoredContext(boolean preserveResponseHeaders, Collection transientHeadersToClear) { final ThreadContextStruct originalContext = threadLocal.get(); + final Map newTransientHeaders = new HashMap<>(originalContext.transientHeaders); + + boolean transientHeadersModified = false; + final Map transientHeaders = propagateTransients(originalContext.transientHeaders); + if (!transientHeaders.isEmpty()) { + newTransientHeaders.putAll(transientHeaders); + transientHeadersModified = true; + } + // clear specific transient headers from the current context - Map newTransientHeaders = null; for (String transientHeaderToClear : transientHeadersToClear) { - if (originalContext.transientHeaders.containsKey(transientHeaderToClear)) { - if (newTransientHeaders == null) { - newTransientHeaders = new HashMap<>(originalContext.transientHeaders); - } + if (newTransientHeaders.containsKey(transientHeaderToClear)) { newTransientHeaders.remove(transientHeaderToClear); + transientHeadersModified = true; } } - if (newTransientHeaders != null) { + + if (transientHeadersModified == true) { ThreadContextStruct threadContextStruct = new ThreadContextStruct( originalContext.requestHeaders, originalContext.responseHeaders, @@ -246,6 +269,7 @@ public StoredContext newStoredContext(boolean preserveResponseHeaders, Collectio } // this is the context when this method returns final ThreadContextStruct newContext = threadLocal.get(); + return () -> { if (preserveResponseHeaders && threadLocal.get() != newContext) { threadLocal.set(originalContext.putResponseHeaders(threadLocal.get().responseHeaders)); @@ -294,7 +318,9 @@ public Supplier wrapRestorable(StoredContext storedContext) { @Override public void writeTo(StreamOutput out) throws IOException { - threadLocal.get().writeTo(out, defaultHeader); + final ThreadContextStruct context = threadLocal.get(); + final Map propagatedHeaders = propagateHeaders(context.transientHeaders); + context.writeTo(out, defaultHeader, propagatedHeaders); } /** @@ -522,6 +548,18 @@ public static Map buildDefaultHeaders(Settings settings) { } } + private Map propagateTransients(Map source) { + final Map transients = new HashMap<>(); + propagators.forEach(p -> transients.putAll(p.transients(source))); + return transients; + } + + private Map propagateHeaders(Map source) { + final Map headers = new HashMap<>(); + propagators.forEach(p -> headers.putAll(p.headers(source))); + return headers; + } + private static final class ThreadContextStruct { private static final ThreadContextStruct EMPTY = new ThreadContextStruct( @@ -695,6 +733,14 @@ private ThreadContextStruct putResponse( return new ThreadContextStruct(requestHeaders, newResponseHeaders, transientHeaders, isSystemContext, newWarningHeaderSize); } + private ThreadContextStruct putTransient(Map values) { + Map newTransient = new HashMap<>(this.transientHeaders); + for (Map.Entry entry : values.entrySet()) { + putSingleHeader(entry.getKey(), entry.getValue(), newTransient); + } + return new ThreadContextStruct(requestHeaders, responseHeaders, newTransient, isSystemContext); + } + private ThreadContextStruct putTransient(String key, Object value) { Map newTransient = new HashMap<>(this.transientHeaders); putSingleHeader(key, value, newTransient); @@ -709,13 +755,15 @@ private ThreadContextStruct copyHeaders(Iterable> head return putHeaders(newHeaders); } - private void writeTo(StreamOutput out, Map defaultHeaders) throws IOException { + private void writeTo(StreamOutput out, Map defaultHeaders, Map propagatedHeaders) + throws IOException { final Map requestHeaders; - if (defaultHeaders.isEmpty()) { + if (defaultHeaders.isEmpty() && propagatedHeaders.isEmpty()) { requestHeaders = this.requestHeaders; } else { requestHeaders = new HashMap<>(defaultHeaders); requestHeaders.putAll(this.requestHeaders); + requestHeaders.putAll(propagatedHeaders); } out.writeVInt(requestHeaders.size()); diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContextStatePropagator.java b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContextStatePropagator.java new file mode 100644 index 0000000000000..b3fc79c5446db --- /dev/null +++ b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContextStatePropagator.java @@ -0,0 +1,31 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.util.concurrent; + +import java.util.Map; + +/** + * The propagator for {@link ThreadContext} that helps to carry-over the state from one + * thread to another (tasks, tracing context, etc). + */ +public interface ThreadContextStatePropagator { + /** + * Returns the list of transient headers that needs to be propagated from current context to new thread context. + * @param source current context transient headers + * @return the list of transient headers that needs to be propagated from current context to new thread context + */ + Map transients(Map source); + + /** + * Returns the list of request headers that needs to be propagated from current context to request. + * @param source current context headers + * @return the list of request headers that needs to be propagated from current context to request + */ + Map headers(Map source); +} diff --git a/server/src/main/java/org/opensearch/env/Environment.java b/server/src/main/java/org/opensearch/env/Environment.java index 938bca58c7081..a1e467ad1ba48 100644 --- a/server/src/main/java/org/opensearch/env/Environment.java +++ b/server/src/main/java/org/opensearch/env/Environment.java @@ -311,10 +311,6 @@ public Path pluginsDir() { return pluginsDir; } - public Path extensionDir() { - return extensionsDir; - } - public Path binDir() { return binDir; } diff --git a/server/src/main/java/org/opensearch/extensions/AddSettingsUpdateConsumerRequestHandler.java b/server/src/main/java/org/opensearch/extensions/AddSettingsUpdateConsumerRequestHandler.java index b1c8b655c4c6f..67c56b7f458ff 100644 --- a/server/src/main/java/org/opensearch/extensions/AddSettingsUpdateConsumerRequestHandler.java +++ b/server/src/main/java/org/opensearch/extensions/AddSettingsUpdateConsumerRequestHandler.java @@ -15,6 +15,7 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.SettingsException; +import org.opensearch.common.settings.SettingsModule; import org.opensearch.common.settings.WriteableSetting; import org.opensearch.transport.TransportResponse; import org.opensearch.transport.TransportService; @@ -31,6 +32,7 @@ public class AddSettingsUpdateConsumerRequestHandler { private final ClusterService clusterService; private final TransportService transportService; private final String updateSettingsRequestType; + private final SettingsModule settingsModule; /** * Instantiates a new Add Settings Update Consumer Request Handler with the {@link ClusterService} and {@link TransportService} @@ -42,11 +44,13 @@ public class AddSettingsUpdateConsumerRequestHandler { public AddSettingsUpdateConsumerRequestHandler( ClusterService clusterService, TransportService transportService, - String updateSettingsRequestType + String updateSettingsRequestType, + SettingsModule settingsModule ) { this.clusterService = clusterService; this.transportService = transportService; this.updateSettingsRequestType = updateSettingsRequestType; + this.settingsModule = settingsModule; } /** @@ -68,25 +72,53 @@ TransportResponse handleAddSettingsUpdateConsumerRequest(AddSettingsUpdateConsum // Extract setting and type from writeable setting Setting setting = extensionComponentSetting.getSetting(); + + // we need to get the actual setting from nodeSetting or indexsetting maps in SettingsModule + // use conditional based on setting properties + Setting settingForUpdateConsumer = null; + if (setting.hasIndexScope()) { + settingForUpdateConsumer = settingsModule.getIndexScopedSettings().get(setting.getKey()); + } else if (setting.hasNodeScope()) { + settingForUpdateConsumer = settingsModule.getClusterSettings().get(setting.getKey()); + } + // do a null check and throw IllegalArgument exception here if neither index or node scope + if (settingForUpdateConsumer == null) { + throw new IllegalArgumentException("Not index or node scope"); + } + WriteableSetting.SettingType settingType = extensionComponentSetting.getType(); - // Register setting update consumer with callback method to extension - clusterService.getClusterSettings().addSettingsUpdateConsumer(setting, (data) -> { - logger.debug("Sending extension request type: " + updateSettingsRequestType); - UpdateSettingsResponseHandler updateSettingsResponseHandler = new UpdateSettingsResponseHandler(); - transportService.sendRequest( - extensionNode, - updateSettingsRequestType, - new UpdateSettingsRequest(settingType, setting, data), - updateSettingsResponseHandler - ); - }); + if (setting.hasIndexScope()) { + // Register setting update consumer with callback method to extension + clusterService.getClusterSettings().addSettingsUpdateConsumer(settingForUpdateConsumer, (data) -> { + logger.debug("Sending extension request type: " + updateSettingsRequestType); + UpdateSettingsResponseHandler updateSettingsResponseHandler = new UpdateSettingsResponseHandler(); + transportService.sendRequest( + extensionNode, + updateSettingsRequestType, + new UpdateSettingsRequest(settingType, setting, data), + updateSettingsResponseHandler + ); + }); + } + if (setting.hasNodeScope()) { + // Register setting update consumer with callback method to extension + clusterService.getClusterSettings().addSettingsUpdateConsumer(settingForUpdateConsumer, (data) -> { + logger.debug("Sending extension request type: " + updateSettingsRequestType); + UpdateSettingsResponseHandler updateSettingsResponseHandler = new UpdateSettingsResponseHandler(); + transportService.sendRequest( + extensionNode, + updateSettingsRequestType, + new UpdateSettingsRequest(settingType, setting, data), + updateSettingsResponseHandler + ); + }); + } } - } catch (SettingsException e) { + } catch (SettingsException | IllegalArgumentException e) { logger.error(e.toString()); status = false; } - return new AcknowledgedResponse(status); } } diff --git a/server/src/main/java/org/opensearch/extensions/ExtensionDependency.java b/server/src/main/java/org/opensearch/extensions/ExtensionDependency.java index d2106cf8d399c..56c9f0387b13e 100644 --- a/server/src/main/java/org/opensearch/extensions/ExtensionDependency.java +++ b/server/src/main/java/org/opensearch/extensions/ExtensionDependency.java @@ -25,6 +25,8 @@ public class ExtensionDependency implements Writeable { private String uniqueId; private Version version; + private static final String UNIQUE_ID = "uniqueId"; + private static final String VERSION = "version"; public ExtensionDependency(String uniqueId, Version version) { this.uniqueId = uniqueId; diff --git a/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java b/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java index 9d74e8f22d2b1..cb22c8d864b1b 100644 --- a/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java +++ b/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java @@ -9,29 +9,18 @@ package org.opensearch.extensions; import java.io.IOException; -import java.io.InputStream; import java.net.InetAddress; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.ArrayList; -import java.util.Collection; import java.util.HashMap; import java.util.HashSet; -import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; import java.util.stream.Collectors; -import java.util.Arrays; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.action.ActionModule; import org.opensearch.action.ActionModule.DynamicActionRegistry; @@ -40,7 +29,8 @@ import org.opensearch.cluster.ClusterSettingsResponse; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Setting; -import org.opensearch.core.util.FileSystemUtils; +import org.opensearch.core.common.Strings; +import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsModule; @@ -65,7 +55,6 @@ import org.opensearch.transport.TransportResponse; import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; -import org.yaml.snakeyaml.Yaml; import org.opensearch.env.EnvironmentSettingsResponse; /** @@ -100,7 +89,6 @@ public static enum OpenSearchRequestType { REQUEST_OPENSEARCH_NAMED_WRITEABLE_REGISTRY } - private final Path extensionsPath; private ExtensionTransportActionsHandler extensionTransportActionsHandler; private Map extensionSettingsMap; private Map initializedExtensions; @@ -117,13 +105,11 @@ public static enum OpenSearchRequestType { /** * Instantiate a new ExtensionsManager object to handle requests and responses from extensions. This is called during Node bootstrap. * - * @param extensionsPath Path to a directory containing extensions. - * @param additionalSettings Additional settings to read in from extensions.yml + * @param additionalSettings Additional settings to read in from extension initialization request * @throws IOException If the extensions discovery file is not properly retrieved. */ - public ExtensionsManager(Path extensionsPath, Set> additionalSettings) throws IOException { + public ExtensionsManager(Set> additionalSettings) throws IOException { logger.info("ExtensionsManager initialized"); - this.extensionsPath = extensionsPath; this.initializedExtensions = new HashMap(); this.extensionIdMap = new HashMap(); this.extensionSettingsMap = new HashMap(); @@ -137,12 +123,6 @@ public ExtensionsManager(Path extensionsPath, Set> additionalSettings } this.client = null; this.extensionTransportActionsHandler = null; - - /* - * Now Discover extensions - */ - discover(); - } /** @@ -172,7 +152,8 @@ public void initializeServicesAndRestHandler( this.addSettingsUpdateConsumerRequestHandler = new AddSettingsUpdateConsumerRequestHandler( clusterService, transportService, - REQUEST_EXTENSION_UPDATE_SETTINGS + REQUEST_EXTENSION_UPDATE_SETTINGS, + settingsModule ); this.client = client; this.extensionTransportActionsHandler = new ExtensionTransportActionsHandler( @@ -306,60 +287,42 @@ private void registerRequestHandler(DynamicActionRegistry dynamicActionRegistry) ); } - /* - * Load and populate all extensions + /** + * Loads a single extension + * @param extension The extension to be loaded */ - protected void discover() throws IOException { - logger.info("Loading extensions : {}", extensionsPath); - if (!FileSystemUtils.isAccessibleDirectory(extensionsPath, logger)) { - return; - } + public void loadExtension(Extension extension) throws IOException { + validateExtension(extension); + DiscoveryExtensionNode discoveryExtensionNode = new DiscoveryExtensionNode( + extension.getName(), + extension.getUniqueId(), + new TransportAddress(InetAddress.getByName(extension.getHostAddress()), Integer.parseInt(extension.getPort())), + new HashMap(), + Version.fromString(extension.getOpensearchVersion()), + Version.fromString(extension.getMinimumCompatibleVersion()), + extension.getDependencies() + ); + extensionIdMap.put(extension.getUniqueId(), discoveryExtensionNode); + extensionSettingsMap.put(extension.getUniqueId(), extension); + logger.info("Loaded extension with uniqueId " + extension.getUniqueId() + ": " + extension); + } - List extensions = new ArrayList(); - if (Files.exists(extensionsPath.resolve("extensions.yml"))) { - try { - extensions = readFromExtensionsYml(extensionsPath.resolve("extensions.yml")).getExtensions(); - } catch (IOException e) { - throw new IOException("Could not read from extensions.yml", e); - } - for (Extension extension : extensions) { - loadExtension(extension); - } - if (!extensionIdMap.isEmpty()) { - logger.info("Loaded all extensions"); - } - } else { - logger.warn("Extensions.yml file is not present. No extensions will be loaded."); + private void validateField(String fieldName, String value) throws IOException { + if (Strings.isNullOrEmpty(value)) { + throw new IOException("Required field [" + fieldName + "] is missing in the request"); } } - /** - * Loads a single extension - * @param extension The extension to be loaded - */ - private void loadExtension(Extension extension) throws IOException { + private void validateExtension(Extension extension) throws IOException { + validateField("extension name", extension.getName()); + validateField("extension uniqueId", extension.getUniqueId()); + validateField("extension host address", extension.getHostAddress()); + validateField("extension port", extension.getPort()); + validateField("extension version", extension.getVersion()); + validateField("opensearch version", extension.getOpensearchVersion()); + validateField("minimum opensearch version", extension.getMinimumCompatibleVersion()); if (extensionIdMap.containsKey(extension.getUniqueId())) { - logger.info("Duplicate uniqueId " + extension.getUniqueId() + ". Did not load extension: " + extension); - } else { - try { - DiscoveryExtensionNode discoveryExtensionNode = new DiscoveryExtensionNode( - extension.getName(), - extension.getUniqueId(), - new TransportAddress(InetAddress.getByName(extension.getHostAddress()), Integer.parseInt(extension.getPort())), - new HashMap(), - Version.fromString(extension.getOpensearchVersion()), - Version.fromString(extension.getMinimumCompatibleVersion()), - extension.getDependencies() - ); - - extensionIdMap.put(extension.getUniqueId(), discoveryExtensionNode); - extensionSettingsMap.put(extension.getUniqueId(), extension); - logger.info("Loaded extension with uniqueId " + extension.getUniqueId() + ": " + extension); - } catch (OpenSearchException e) { - logger.error("Could not load extension with uniqueId " + extension.getUniqueId() + " due to " + e); - } catch (IllegalArgumentException e) { - throw e; - } + throw new IOException("Duplicate uniqueId [" + extension.getUniqueId() + "]. Did not load extension: " + extension); } } @@ -407,27 +370,35 @@ public String executor() { return ThreadPool.Names.GENERIC; } }; - try { - logger.info("Sending extension request type: " + REQUEST_EXTENSION_ACTION_NAME); - transportService.connectToExtensionNode(extension); - transportService.sendRequest( - extension, - REQUEST_EXTENSION_ACTION_NAME, - new InitializeExtensionRequest(transportService.getLocalNode(), extension), - initializeExtensionResponseHandler - ); - inProgressFuture.orTimeout(EXTENSION_REQUEST_WAIT_TIMEOUT, TimeUnit.SECONDS).join(); - } catch (CompletionException | ConnectTransportException e) { - if (e.getCause() instanceof TimeoutException || e instanceof ConnectTransportException) { - logger.info("No response from extension to request.", e); - } else if (e.getCause() instanceof RuntimeException) { - throw (RuntimeException) e.getCause(); - } else if (e.getCause() instanceof Error) { - throw (Error) e.getCause(); - } else { - throw new RuntimeException(e.getCause()); + + logger.info("Sending extension request type: " + REQUEST_EXTENSION_ACTION_NAME); + transportService.getThreadPool().generic().execute(new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + extensionIdMap.remove(extension.getId()); + if (e.getCause() instanceof ConnectTransportException) { + logger.info("No response from extension to request.", e); + throw (ConnectTransportException) e.getCause(); + } else if (e.getCause() instanceof RuntimeException) { + throw (RuntimeException) e.getCause(); + } else if (e.getCause() instanceof Error) { + throw (Error) e.getCause(); + } else { + throw new RuntimeException(e.getCause()); + } } - } + + @Override + protected void doRun() throws Exception { + transportService.connectToExtensionNode(extension); + transportService.sendRequest( + extension, + REQUEST_EXTENSION_ACTION_NAME, + new InitializeExtensionRequest(transportService.getLocalNode(), extension), + initializeExtensionResponseHandler + ); + } + }); } /** @@ -466,84 +437,6 @@ TransportResponse handleExtensionRequest(ExtensionRequest extensionRequest) thro } } - private ExtensionsSettings readFromExtensionsYml(Path filePath) throws IOException { - Yaml yaml = new Yaml(); - try (InputStream inputStream = Files.newInputStream(filePath)) { - Map obj = yaml.load(inputStream); - if (obj == null) { - inputStream.close(); - throw new IOException("extensions.yml is empty"); - } - List> unreadExtensions = new ArrayList<>((Collection>) obj.get("extensions")); - List readExtensions = new ArrayList(); - Set additionalSettingsKeys = additionalSettings.stream().map(s -> s.getKey()).collect(Collectors.toSet()); - for (HashMap extensionMap : unreadExtensions) { - try { - // checking to see whether any required fields are missing from extension.yml file or not - String[] requiredFields = { - "name", - "uniqueId", - "hostAddress", - "port", - "version", - "opensearchVersion", - "minimumCompatibleVersion" }; - List missingFields = Arrays.stream(requiredFields) - .filter(field -> !extensionMap.containsKey(field)) - .collect(Collectors.toList()); - if (!missingFields.isEmpty()) { - throw new IOException("Extension is missing these required fields : " + missingFields); - } - - // Parse extension dependencies - List extensionDependencyList = new ArrayList(); - if (extensionMap.get("dependencies") != null) { - List> extensionDependencies = new ArrayList<>( - (Collection>) extensionMap.get("dependencies") - ); - for (HashMap dependency : extensionDependencies) { - extensionDependencyList.add( - new ExtensionDependency( - dependency.get("uniqueId").toString(), - Version.fromString(dependency.get("version").toString()) - ) - ); - } - } - - ExtensionScopedSettings extAdditionalSettings = new ExtensionScopedSettings(additionalSettings); - Map additionalSettingsMap = extensionMap.entrySet() - .stream() - .filter(kv -> additionalSettingsKeys.contains(kv.getKey())) - .collect(Collectors.toMap(map -> map.getKey(), map -> map.getValue())); - - Settings.Builder output = Settings.builder(); - output.loadFromMap(additionalSettingsMap); - extAdditionalSettings.applySettings(output.build()); - - // Create extension read from yml config - readExtensions.add( - new Extension( - extensionMap.get("name").toString(), - extensionMap.get("uniqueId").toString(), - extensionMap.get("hostAddress").toString(), - extensionMap.get("port").toString(), - extensionMap.get("version").toString(), - extensionMap.get("opensearchVersion").toString(), - extensionMap.get("minimumCompatibleVersion").toString(), - extensionDependencyList, - extAdditionalSettings - ) - ); - } catch (IOException e) { - logger.warn("loading extension has been failed because of exception : " + e.getMessage()); - } - } - inputStream.close(); - return new ExtensionsSettings(readExtensions); - } - } - static String getRequestExtensionActionName() { return REQUEST_EXTENSION_ACTION_NAME; } @@ -560,10 +453,6 @@ static Logger getLogger() { return logger; } - Path getExtensionsPath() { - return extensionsPath; - } - TransportService getTransportService() { return transportService; } @@ -615,4 +504,8 @@ void setAddSettingsUpdateConsumerRequestHandler(AddSettingsUpdateConsumerRequest Settings getEnvironmentSettings() { return environmentSettings; } + + public Set> getAdditionalSettings() { + return this.additionalSettings; + } } diff --git a/server/src/main/java/org/opensearch/extensions/NoopExtensionsManager.java b/server/src/main/java/org/opensearch/extensions/NoopExtensionsManager.java index fb7160bc1bc67..d434074279041 100644 --- a/server/src/main/java/org/opensearch/extensions/NoopExtensionsManager.java +++ b/server/src/main/java/org/opensearch/extensions/NoopExtensionsManager.java @@ -9,7 +9,6 @@ package org.opensearch.extensions; import java.io.IOException; -import java.nio.file.Path; import java.util.Optional; import java.util.Set; @@ -32,7 +31,7 @@ public class NoopExtensionsManager extends ExtensionsManager { public NoopExtensionsManager() throws IOException { - super(Path.of(""), Set.of()); + super(Set.of()); } @Override @@ -59,11 +58,6 @@ public ExtensionActionResponse handleTransportRequest(ExtensionActionRequest req return new ExtensionActionResponse(new byte[0]); } - @Override - protected void discover() throws IOException { - // no-op - } - @Override public void initialize() { // no-op diff --git a/server/src/main/java/org/opensearch/extensions/action/ExtensionActionUtil.java b/server/src/main/java/org/opensearch/extensions/action/ExtensionActionUtil.java new file mode 100644 index 0000000000000..8b898b3afb1e2 --- /dev/null +++ b/server/src/main/java/org/opensearch/extensions/action/ExtensionActionUtil.java @@ -0,0 +1,117 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.extensions.action; + +import org.opensearch.action.ActionRequest; +import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.Writeable; + +import java.io.IOException; +import java.lang.reflect.Constructor; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; + +/** + * ExtensionActionUtil - a class for creating and processing remote requests using byte arrays. + */ +public class ExtensionActionUtil { + + /** + * The Unicode UNIT SEPARATOR used to separate the Request class name and parameter bytes + */ + public static final byte UNIT_SEPARATOR = (byte) '\u001F'; + + /** + * @param request an instance of a request extending {@link ActionRequest}, containing information about the + * request being sent to the remote server. It is used to create a byte array containing the request data, + * which will be sent to the remote server. + * @return An Extension ActionRequest object that represents the deserialized data. + * If an error occurred during the deserialization process, the method will return {@code null}. + * @throws RuntimeException If a RuntimeException occurs while creating the proxy request bytes. + */ + public static byte[] createProxyRequestBytes(ActionRequest request) throws RuntimeException { + byte[] requestClassBytes = request.getClass().getName().getBytes(StandardCharsets.UTF_8); + byte[] requestBytes; + + try { + requestBytes = convertParamsToBytes(request); + assert requestBytes != null; + return ByteBuffer.allocate(requestClassBytes.length + 1 + requestBytes.length) + .put(requestClassBytes) + .put(UNIT_SEPARATOR) + .put(requestBytes) + .array(); + } catch (RuntimeException e) { + throw new RuntimeException("RuntimeException occurred while creating proxyRequestBytes"); + } + } + + /** + * @param requestBytes is a byte array containing the request data, used by the "createActionRequest" + * method to create an "ActionRequest" object, which represents the request model to be processed on the server. + * @return an "Action Request" object representing the request model for processing on the server, + * or {@code null} if the request data is invalid or null. + * @throws ReflectiveOperationException if an exception occurs during the reflective operation, such as when + * resolving the request class, accessing the constructor, or creating an instance using reflection + * @throws NullPointerException if a null pointer exception occurs during the creation of the ActionRequest object + */ + public static ActionRequest createActionRequest(byte[] requestBytes) throws ReflectiveOperationException { + int delimPos = delimPos(requestBytes); + String requestClassName = new String(Arrays.copyOfRange(requestBytes, 0, delimPos + 1), StandardCharsets.UTF_8).stripTrailing(); + try { + Class clazz = Class.forName(requestClassName); + Constructor constructor = clazz.getConstructor(StreamInput.class); + StreamInput requestByteStream = StreamInput.wrap(Arrays.copyOfRange(requestBytes, delimPos + 1, requestBytes.length)); + return (ActionRequest) constructor.newInstance(requestByteStream); + } catch (ReflectiveOperationException e) { + throw new ReflectiveOperationException( + "ReflectiveOperationException occurred while creating extensionAction request from bytes", + e + ); + } catch (NullPointerException e) { + throw new NullPointerException( + "NullPointerException occurred while creating extensionAction request from bytes" + e.getMessage() + ); + } + } + + /** + * Converts the given object of type T, which implements the {@link Writeable} interface, to a byte array. + * @param the type of the object to be converted to bytes, which must implement the {@link Writeable} interface. + * @param writeableObject the object of type T to be converted to bytes. + * @return a byte array containing the serialized bytes of the given object, or {@code null} if the input is invalid or null. + * @throws IllegalStateException if a failure occurs while writing the data + */ + public static byte[] convertParamsToBytes(T writeableObject) throws IllegalStateException { + try (BytesStreamOutput out = new BytesStreamOutput()) { + writeableObject.writeTo(out); + return BytesReference.toBytes(out.bytes()); + } catch (IOException ieo) { + throw new IllegalStateException("Failure writing bytes", ieo); + } + } + + /** + * Searches for the position of the unit separator byte in the given byte array. + * + * @param bytes the byte array to search for the unit separator byte. + * @return the index of the unit separator byte in the byte array, or -1 if it was not found. + */ + public static int delimPos(byte[] bytes) { + for (int offset = 0; offset < bytes.length; ++offset) { + if (bytes[offset] == ExtensionActionUtil.UNIT_SEPARATOR) { + return offset; + } + } + return -1; + } +} diff --git a/server/src/main/java/org/opensearch/extensions/rest/RestActionsRequestHandler.java b/server/src/main/java/org/opensearch/extensions/rest/RestActionsRequestHandler.java index 37638f2a333d5..d890c1b85bb81 100644 --- a/server/src/main/java/org/opensearch/extensions/rest/RestActionsRequestHandler.java +++ b/server/src/main/java/org/opensearch/extensions/rest/RestActionsRequestHandler.java @@ -13,7 +13,6 @@ import org.opensearch.extensions.DiscoveryExtensionNode; import org.opensearch.rest.RestController; import org.opensearch.rest.RestHandler; -import org.opensearch.rest.extensions.RestSendToExtensionAction; import org.opensearch.transport.TransportResponse; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/extensions/rest/RestInitializeExtensionAction.java b/server/src/main/java/org/opensearch/extensions/rest/RestInitializeExtensionAction.java new file mode 100644 index 0000000000000..f47f342617732 --- /dev/null +++ b/server/src/main/java/org/opensearch/extensions/rest/RestInitializeExtensionAction.java @@ -0,0 +1,192 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.extensions.rest; + +import org.opensearch.Version; +import org.opensearch.client.node.NodeClient; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.common.Strings; +import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.extensions.ExtensionDependency; +import org.opensearch.extensions.ExtensionScopedSettings; +import org.opensearch.extensions.ExtensionsManager; +import org.opensearch.extensions.ExtensionsSettings.Extension; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.BytesRestResponse; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.RestStatus; +import org.opensearch.transport.ConnectTransportException; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CompletionException; +import java.util.concurrent.TimeoutException; +import java.util.stream.Collectors; + +import static org.opensearch.rest.RestRequest.Method.POST; + +/** + * An action that initializes an extension + */ +public class RestInitializeExtensionAction extends BaseRestHandler { + + private final ExtensionsManager extensionsManager; + + @Override + public String getName() { + return ExtensionsManager.REQUEST_EXTENSION_ACTION_NAME; + } + + @Override + public List routes() { + return List.of(new Route(POST, "/_extensions/initialize")); + } + + public RestInitializeExtensionAction(ExtensionsManager extensionsManager) { + this.extensionsManager = extensionsManager; + } + + @Override + public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + String name = null; + String uniqueId = null; + String hostAddress = null; + String port = null; + String version = null; + String openSearchVersion = null; + String minimumCompatibleVersion = null; + List dependencies = new ArrayList<>(); + Set additionalSettingsKeys = extensionsManager.getAdditionalSettings() + .stream() + .map(s -> s.getKey()) + .collect(Collectors.toSet()); + + Tuple> unreadExtensionTuple = XContentHelper.convertToMap( + request.content(), + false, + request.getXContentType().xContent().mediaType() + ); + Map extensionMap = unreadExtensionTuple.v2(); + + ExtensionScopedSettings extAdditionalSettings = new ExtensionScopedSettings(extensionsManager.getAdditionalSettings()); + + try { + // checking to see whether any required fields are missing from extension initialization request or not + String[] requiredFields = { + "name", + "uniqueId", + "hostAddress", + "port", + "version", + "opensearchVersion", + "minimumCompatibleVersion" }; + List missingFields = Arrays.stream(requiredFields) + .filter(field -> !extensionMap.containsKey(field)) + .collect(Collectors.toList()); + if (!missingFields.isEmpty()) { + throw new IOException("Extension is missing these required fields : " + missingFields); + } + + // Parse extension dependencies + List extensionDependencyList = new ArrayList(); + if (extensionMap.get("dependencies") != null) { + List> extensionDependencies = new ArrayList<>( + (Collection>) extensionMap.get("dependencies") + ); + for (HashMap dependency : extensionDependencies) { + if (Strings.isNullOrEmpty((String) dependency.get("uniqueId"))) { + throw new IOException("Required field [uniqueId] is missing in the request for the dependent extension"); + } else if (dependency.get("version") == null) { + throw new IOException("Required field [version] is missing in the request for the dependent extension"); + } + extensionDependencyList.add( + new ExtensionDependency( + dependency.get("uniqueId").toString(), + Version.fromString(dependency.get("version").toString()) + ) + ); + } + } + + Map additionalSettingsMap = extensionMap.entrySet() + .stream() + .filter(kv -> additionalSettingsKeys.contains(kv.getKey())) + .collect(Collectors.toMap(map -> map.getKey(), map -> map.getValue())); + + Settings.Builder output = Settings.builder(); + output.loadFromMap(additionalSettingsMap); + extAdditionalSettings.applySettings(output.build()); + + // Create extension read from initialization request + name = extensionMap.get("name").toString(); + uniqueId = extensionMap.get("uniqueId").toString(); + hostAddress = extensionMap.get("hostAddress").toString(); + port = extensionMap.get("port").toString(); + version = extensionMap.get("version").toString(); + openSearchVersion = extensionMap.get("opensearchVersion").toString(); + minimumCompatibleVersion = extensionMap.get("minimumCompatibleVersion").toString(); + dependencies = extensionDependencyList; + } catch (IOException e) { + logger.warn("loading extension has been failed because of exception : " + e.getMessage()); + return channel -> channel.sendResponse(new BytesRestResponse(RestStatus.INTERNAL_SERVER_ERROR, e.getMessage())); + } + + Extension extension = new Extension( + name, + uniqueId, + hostAddress, + port, + version, + openSearchVersion, + minimumCompatibleVersion, + dependencies, + extAdditionalSettings + ); + try { + extensionsManager.loadExtension(extension); + extensionsManager.initialize(); + } catch (CompletionException e) { + Throwable cause = e.getCause(); + if (cause instanceof TimeoutException) { + return channel -> channel.sendResponse( + new BytesRestResponse(RestStatus.REQUEST_TIMEOUT, "No response from extension to request.") + ); + } else if (cause instanceof ConnectTransportException || cause instanceof RuntimeException) { + return channel -> channel.sendResponse( + new BytesRestResponse(RestStatus.REQUEST_TIMEOUT, "Connection failed with the extension.") + ); + } + if (e.getCause() instanceof Error) { + throw (Error) e.getCause(); + } + } catch (Exception e) { + return channel -> channel.sendResponse(new BytesRestResponse(RestStatus.INTERNAL_SERVER_ERROR, e.getMessage())); + + } + + return channel -> { + try (XContentBuilder builder = channel.newBuilder()) { + builder.startObject(); + builder.field("success", "A request to initialize an extension has been sent."); + builder.endObject(); + channel.sendResponse(new BytesRestResponse(RestStatus.ACCEPTED, builder)); + } + }; + + } +} diff --git a/server/src/main/java/org/opensearch/rest/extensions/RestSendToExtensionAction.java b/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java similarity index 98% rename from server/src/main/java/org/opensearch/rest/extensions/RestSendToExtensionAction.java rename to server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java index 51ff74b1869a0..073b3f3f45818 100644 --- a/server/src/main/java/org/opensearch/rest/extensions/RestSendToExtensionAction.java +++ b/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.rest.extensions; +package org.opensearch.extensions.rest; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -17,9 +17,6 @@ import org.opensearch.common.xcontent.XContentType; import org.opensearch.extensions.DiscoveryExtensionNode; import org.opensearch.extensions.ExtensionsManager; -import org.opensearch.extensions.rest.ExtensionRestRequest; -import org.opensearch.extensions.rest.RegisterRestActionsRequest; -import org.opensearch.extensions.rest.RestExecuteOnExtensionResponse; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.NamedRoute; diff --git a/server/src/main/java/org/opensearch/index/IndexService.java b/server/src/main/java/org/opensearch/index/IndexService.java index 73797106bb66f..e6930b41088e2 100644 --- a/server/src/main/java/org/opensearch/index/IndexService.java +++ b/server/src/main/java/org/opensearch/index/IndexService.java @@ -603,7 +603,10 @@ public synchronized void removeShard(int shardId, String reason) { private void closeShard(String reason, ShardId sId, IndexShard indexShard, Store store, IndexEventListener listener) { final int shardId = sId.id(); final Settings indexSettings = this.getIndexSettings().getSettings(); - Store remoteStore = indexShard.remoteStore(); + Store remoteStore = null; + if (indexShard != null) { + remoteStore = indexShard.remoteStore(); + } if (store != null) { store.beforeClose(); } diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 9c6613495ba80..dae2d2369fc4c 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -595,6 +595,16 @@ public final class IndexSettings { Property.Dynamic ); + public static final TimeValue DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL = new TimeValue(650, TimeUnit.MILLISECONDS); + public static final TimeValue MINIMUM_REMOTE_TRANSLOG_BUFFER_INTERVAL = TimeValue.ZERO; + public static final Setting INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING = Setting.timeSetting( + "index.remote_store.translog.buffer_interval", + DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, + MINIMUM_REMOTE_TRANSLOG_BUFFER_INTERVAL, + Property.Dynamic, + Property.IndexScope + ); + private final Index index; private final Version version; private final Logger logger; @@ -604,7 +614,7 @@ public final class IndexSettings { private final ReplicationType replicationType; private final boolean isRemoteStoreEnabled; private final boolean isRemoteTranslogStoreEnabled; - private final TimeValue remoteTranslogUploadBufferInterval; + private volatile TimeValue remoteTranslogUploadBufferInterval; private final String remoteStoreTranslogRepository; private final String remoteStoreRepository; private final boolean isRemoteSnapshot; @@ -775,10 +785,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti isRemoteStoreEnabled = settings.getAsBoolean(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, false); isRemoteTranslogStoreEnabled = settings.getAsBoolean(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_ENABLED, false); remoteStoreTranslogRepository = settings.get(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY); - remoteTranslogUploadBufferInterval = settings.getAsTime( - IndexMetadata.SETTING_REMOTE_TRANSLOG_BUFFER_INTERVAL, - TimeValue.timeValueMillis(100) - ); + remoteTranslogUploadBufferInterval = INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(settings); remoteStoreRepository = settings.get(IndexMetadata.SETTING_REMOTE_STORE_REPOSITORY); isRemoteSnapshot = IndexModule.Type.REMOTE_SNAPSHOT.match(this.settings); @@ -911,6 +918,10 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti scopedSettings.addSettingsUpdateConsumer(INDEX_MERGE_ON_FLUSH_ENABLED, this::setMergeOnFlushEnabled); scopedSettings.addSettingsUpdateConsumer(INDEX_MERGE_ON_FLUSH_POLICY, this::setMergeOnFlushPolicy); scopedSettings.addSettingsUpdateConsumer(DEFAULT_SEARCH_PIPELINE, this::setDefaultSearchPipeline); + scopedSettings.addSettingsUpdateConsumer( + INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, + this::setRemoteTranslogUploadBufferInterval + ); } private void setSearchSegmentOrderReversed(boolean reversed) { @@ -1191,6 +1202,10 @@ public TimeValue getRemoteTranslogUploadBufferInterval() { return remoteTranslogUploadBufferInterval; } + public void setRemoteTranslogUploadBufferInterval(TimeValue remoteTranslogUploadBufferInterval) { + this.remoteTranslogUploadBufferInterval = remoteTranslogUploadBufferInterval; + } + /** * Returns this interval in which the shards of this index are asynchronously refreshed. {@code -1} means async refresh is disabled. */ diff --git a/server/src/main/java/org/opensearch/index/codec/CodecService.java b/server/src/main/java/org/opensearch/index/codec/CodecService.java index e4899c02d37e8..b6dac7bd1596c 100644 --- a/server/src/main/java/org/opensearch/index/codec/CodecService.java +++ b/server/src/main/java/org/opensearch/index/codec/CodecService.java @@ -38,6 +38,8 @@ import org.apache.lucene.codecs.lucene95.Lucene95Codec.Mode; import org.opensearch.common.Nullable; import org.opensearch.common.collect.MapBuilder; +import org.opensearch.index.codec.customcodecs.ZstdCodec; +import org.opensearch.index.codec.customcodecs.ZstdNoDictCodec; import org.opensearch.index.mapper.MapperService; import java.util.Map; @@ -58,15 +60,21 @@ public class CodecService { public static final String BEST_COMPRESSION_CODEC = "best_compression"; /** the raw unfiltered lucene default. useful for testing */ public static final String LUCENE_DEFAULT_CODEC = "lucene_default"; + public static final String ZSTD_CODEC = "zstd"; + public static final String ZSTD_NO_DICT_CODEC = "zstd_no_dict"; public CodecService(@Nullable MapperService mapperService, Logger logger) { final MapBuilder codecs = MapBuilder.newMapBuilder(); if (mapperService == null) { codecs.put(DEFAULT_CODEC, new Lucene95Codec()); codecs.put(BEST_COMPRESSION_CODEC, new Lucene95Codec(Mode.BEST_COMPRESSION)); + codecs.put(ZSTD_CODEC, new ZstdCodec()); + codecs.put(ZSTD_NO_DICT_CODEC, new ZstdNoDictCodec()); } else { codecs.put(DEFAULT_CODEC, new PerFieldMappingPostingFormatCodec(Mode.BEST_SPEED, mapperService, logger)); codecs.put(BEST_COMPRESSION_CODEC, new PerFieldMappingPostingFormatCodec(Mode.BEST_COMPRESSION, mapperService, logger)); + codecs.put(ZSTD_CODEC, new ZstdCodec(mapperService, logger)); + codecs.put(ZSTD_NO_DICT_CODEC, new ZstdNoDictCodec(mapperService, logger)); } codecs.put(LUCENE_DEFAULT_CODEC, Codec.getDefault()); for (String codec : Codec.availableCodecs()) { diff --git a/sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomCodec.java b/server/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomCodec.java similarity index 65% rename from sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomCodec.java rename to server/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomCodec.java index ad9e5cd3374fa..3c570f9d0566c 100644 --- a/sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomCodec.java +++ b/server/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomCodec.java @@ -8,19 +8,27 @@ package org.opensearch.index.codec.customcodecs; -import org.apache.lucene.codecs.StoredFieldsFormat; +import org.apache.logging.log4j.Logger; import org.apache.lucene.codecs.FilterCodec; +import org.apache.lucene.codecs.StoredFieldsFormat; import org.apache.lucene.codecs.lucene95.Lucene95Codec; +import org.opensearch.index.codec.PerFieldMappingPostingFormatCodec; +import org.opensearch.index.mapper.MapperService; -import java.util.Locale; - -abstract class Lucene95CustomCodec extends FilterCodec { +/** + * + * Extends {@link FilterCodec} to reuse the functionality of Lucene Codec. + * Supports two modes zstd and zstd_no_dict. + * + * @opensearch.internal + */ +public abstract class Lucene95CustomCodec extends FilterCodec { public static final int DEFAULT_COMPRESSION_LEVEL = 6; /** Each mode represents a compression algorithm. */ public enum Mode { ZSTD, - ZSTDNODICT + ZSTD_NO_DICT } private final StoredFieldsFormat storedFieldsFormat; @@ -40,10 +48,15 @@ public Lucene95CustomCodec(Mode mode) { * the other compression codecs: default, lucene_default, and best_compression. * * @param mode The compression codec (ZSTD or ZSTDNODICT). - * @parama compressionLevel The compression level. + * @param compressionLevel The compression level. */ public Lucene95CustomCodec(Mode mode, int compressionLevel) { - super(mode.name().toLowerCase(Locale.ROOT), new Lucene95Codec()); + super("Lucene95CustomCodec", new Lucene95Codec()); + this.storedFieldsFormat = new Lucene95CustomStoredFieldsFormat(mode, compressionLevel); + } + + public Lucene95CustomCodec(Mode mode, int compressionLevel, MapperService mapperService, Logger logger) { + super("Lucene95CustomCodec", new PerFieldMappingPostingFormatCodec(Lucene95Codec.Mode.BEST_SPEED, mapperService, logger)); this.storedFieldsFormat = new Lucene95CustomStoredFieldsFormat(mode, compressionLevel); } diff --git a/sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormat.java b/server/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormat.java similarity index 96% rename from sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormat.java rename to server/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormat.java index 2bfec2ef171d4..f70306afc8562 100644 --- a/sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormat.java +++ b/server/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormat.java @@ -8,8 +8,6 @@ package org.opensearch.index.codec.customcodecs; -import java.io.IOException; -import java.util.Objects; import org.apache.lucene.codecs.StoredFieldsFormat; import org.apache.lucene.codecs.StoredFieldsReader; import org.apache.lucene.codecs.StoredFieldsWriter; @@ -20,6 +18,9 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; +import java.io.IOException; +import java.util.Objects; + /** Stored field format used by pluggable codec */ public class Lucene95CustomStoredFieldsFormat extends StoredFieldsFormat { @@ -95,7 +96,7 @@ public StoredFieldsWriter fieldsWriter(Directory directory, SegmentInfo si, IOCo return impl(mode).fieldsWriter(directory, si, context); } - private StoredFieldsFormat impl(Lucene95CustomCodec.Mode mode) { + StoredFieldsFormat impl(Lucene95CustomCodec.Mode mode) { switch (mode) { case ZSTD: return new Lucene90CompressingStoredFieldsFormat( @@ -105,7 +106,7 @@ private StoredFieldsFormat impl(Lucene95CustomCodec.Mode mode) { ZSTD_MAX_DOCS_PER_BLOCK, ZSTD_BLOCK_SHIFT ); - case ZSTDNODICT: + case ZSTD_NO_DICT: return new Lucene90CompressingStoredFieldsFormat( "CustomStoredFieldsZstdNoDict", zstdNoDictCompressionMode, @@ -117,4 +118,8 @@ private StoredFieldsFormat impl(Lucene95CustomCodec.Mode mode) { throw new AssertionError(); } } + + Lucene95CustomCodec.Mode getMode() { + return mode; + } } diff --git a/sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCodec.java b/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCodec.java similarity index 79% rename from sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCodec.java rename to server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCodec.java index 2b09540d8037d..68da782421e6e 100644 --- a/sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCodec.java +++ b/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCodec.java @@ -8,6 +8,9 @@ package org.opensearch.index.codec.customcodecs; +import org.apache.logging.log4j.Logger; +import org.opensearch.index.mapper.MapperService; + /** * ZstdCodec provides ZSTD compressor using the zstd-jni library. */ @@ -29,6 +32,10 @@ public ZstdCodec(int compressionLevel) { super(Mode.ZSTD, compressionLevel); } + public ZstdCodec(MapperService mapperService, Logger logger) { + super(Mode.ZSTD, DEFAULT_COMPRESSION_LEVEL, mapperService, logger); + } + /** The name for this codec. */ @Override public String toString() { diff --git a/sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCompressionMode.java b/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCompressionMode.java similarity index 99% rename from sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCompressionMode.java rename to server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCompressionMode.java index 5b8f1ffcc9569..7057dac3d6bd2 100644 --- a/sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCompressionMode.java +++ b/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCompressionMode.java @@ -13,16 +13,17 @@ import com.github.luben.zstd.ZstdDecompressCtx; import com.github.luben.zstd.ZstdDictCompress; import com.github.luben.zstd.ZstdDictDecompress; -import java.io.IOException; import org.apache.lucene.codecs.compressing.CompressionMode; import org.apache.lucene.codecs.compressing.Compressor; import org.apache.lucene.codecs.compressing.Decompressor; +import org.apache.lucene.store.ByteBuffersDataInput; import org.apache.lucene.store.DataInput; import org.apache.lucene.store.DataOutput; -import org.apache.lucene.store.ByteBuffersDataInput; import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.BytesRef; +import java.io.IOException; + /** Zstandard Compression Mode */ public class ZstdCompressionMode extends CompressionMode { diff --git a/sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCodec.java b/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCodec.java similarity index 74% rename from sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCodec.java rename to server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCodec.java index 4ed6ba57545d0..26620473ec116 100644 --- a/sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCodec.java +++ b/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCodec.java @@ -8,6 +8,9 @@ package org.opensearch.index.codec.customcodecs; +import org.apache.logging.log4j.Logger; +import org.opensearch.index.mapper.MapperService; + /** * ZstdNoDictCodec provides ZSTD compressor without a dictionary support. */ @@ -26,7 +29,11 @@ public ZstdNoDictCodec() { * @param compressionLevel The compression level. */ public ZstdNoDictCodec(int compressionLevel) { - super(Mode.ZSTDNODICT, compressionLevel); + super(Mode.ZSTD_NO_DICT, compressionLevel); + } + + public ZstdNoDictCodec(MapperService mapperService, Logger logger) { + super(Mode.ZSTD_NO_DICT, DEFAULT_COMPRESSION_LEVEL, mapperService, logger); } /** The name for this codec. */ diff --git a/sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressionMode.java b/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressionMode.java similarity index 99% rename from sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressionMode.java rename to server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressionMode.java index 6cfd85b053190..7a1d661550768 100644 --- a/sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressionMode.java +++ b/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressionMode.java @@ -9,16 +9,17 @@ package org.opensearch.index.codec.customcodecs; import com.github.luben.zstd.Zstd; -import java.io.IOException; import org.apache.lucene.codecs.compressing.CompressionMode; import org.apache.lucene.codecs.compressing.Compressor; import org.apache.lucene.codecs.compressing.Decompressor; +import org.apache.lucene.store.ByteBuffersDataInput; import org.apache.lucene.store.DataInput; import org.apache.lucene.store.DataOutput; -import org.apache.lucene.store.ByteBuffersDataInput; import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.BytesRef; +import java.io.IOException; + /** ZSTD Compression Mode (without a dictionary support). */ public class ZstdNoDictCompressionMode extends CompressionMode { diff --git a/sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/package-info.java b/server/src/main/java/org/opensearch/index/codec/customcodecs/package-info.java similarity index 100% rename from sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/package-info.java rename to server/src/main/java/org/opensearch/index/codec/customcodecs/package-info.java diff --git a/server/src/main/java/org/opensearch/index/engine/EngineConfig.java b/server/src/main/java/org/opensearch/index/engine/EngineConfig.java index 338a541af387a..7419cf1dadea6 100644 --- a/server/src/main/java/org/opensearch/index/engine/EngineConfig.java +++ b/server/src/main/java/org/opensearch/index/engine/EngineConfig.java @@ -129,12 +129,14 @@ public Supplier retentionLeasesSupplier() { switch (s) { case "default": case "best_compression": + case "zstd": + case "zstd_no_dict": case "lucene_default": return s; default: if (Codec.availableCodecs().contains(s) == false) { // we don't error message the not officially supported ones throw new IllegalArgumentException( - "unknown value for [index.codec] must be one of [default, best_compression] but was: " + s + "unknown value for [index.codec] must be one of [default, best_compression, zstd, zstd_no_dict] but was: " + s ); } return s; diff --git a/server/src/main/java/org/opensearch/index/fielddata/IndexFieldData.java b/server/src/main/java/org/opensearch/index/fielddata/IndexFieldData.java index 529405c90c9a4..36e6a242ecdec 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/IndexFieldData.java +++ b/server/src/main/java/org/opensearch/index/fielddata/IndexFieldData.java @@ -120,11 +120,13 @@ abstract class XFieldComparatorSource extends FieldComparatorSource { protected final MultiValueMode sortMode; protected final Object missingValue; protected final Nested nested; + protected boolean enableSkipping; public XFieldComparatorSource(Object missingValue, MultiValueMode sortMode, Nested nested) { this.sortMode = sortMode; this.missingValue = missingValue; this.nested = nested; + this.enableSkipping = true; // true by default } public MultiValueMode sortMode() { @@ -135,6 +137,10 @@ public Nested nested() { return this.nested; } + public void disableSkipping() { + this.enableSkipping = false; + } + /** * Simple wrapper class around a filter that matches parent documents * and a filter that matches child documents. For every root document R, diff --git a/server/src/main/java/org/opensearch/index/fielddata/IndexNumericFieldData.java b/server/src/main/java/org/opensearch/index/fielddata/IndexNumericFieldData.java index 052a679de9765..ae8ffd8fe6b97 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/IndexNumericFieldData.java +++ b/server/src/main/java/org/opensearch/index/fielddata/IndexNumericFieldData.java @@ -198,24 +198,35 @@ private XFieldComparatorSource comparatorSource( MultiValueMode sortMode, Nested nested ) { + final XFieldComparatorSource source; switch (targetNumericType) { case HALF_FLOAT: case FLOAT: - return new FloatValuesComparatorSource(this, missingValue, sortMode, nested); + source = new FloatValuesComparatorSource(this, missingValue, sortMode, nested); + break; case DOUBLE: - return new DoubleValuesComparatorSource(this, missingValue, sortMode, nested); + source = new DoubleValuesComparatorSource(this, missingValue, sortMode, nested); + break; case UNSIGNED_LONG: - return new UnsignedLongValuesComparatorSource(this, missingValue, sortMode, nested); + source = new UnsignedLongValuesComparatorSource(this, missingValue, sortMode, nested); + break; case DATE: - return dateComparatorSource(missingValue, sortMode, nested); + source = dateComparatorSource(missingValue, sortMode, nested); + break; case DATE_NANOSECONDS: - return dateNanosComparatorSource(missingValue, sortMode, nested); + source = dateNanosComparatorSource(missingValue, sortMode, nested); + break; case LONG: - return new LongValuesComparatorSource(this, missingValue, sortMode, nested); + source = new LongValuesComparatorSource(this, missingValue, sortMode, nested); + break; default: assert !targetNumericType.isFloatingPoint(); - return new IntValuesComparatorSource(this, missingValue, sortMode, nested); + source = new IntValuesComparatorSource(this, missingValue, sortMode, nested); } + if (targetNumericType != getNumericType()) { + source.disableSkipping(); // disable skipping logic for caste of sort field + } + return source; } protected XFieldComparatorSource dateComparatorSource(@Nullable Object missingValue, MultiValueMode sortMode, Nested nested) { diff --git a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/DoubleValuesComparatorSource.java b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/DoubleValuesComparatorSource.java index f8af86c904f2c..34e86070054c9 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/DoubleValuesComparatorSource.java +++ b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/DoubleValuesComparatorSource.java @@ -104,7 +104,7 @@ public FieldComparator newComparator(String fieldname, int numHits, boolean e final double dMissingValue = (Double) missingObject(missingValue, reversed); // NOTE: it's important to pass null as a missing value in the constructor so that // the comparator doesn't check docsWithField since we replace missing values in select() - return new DoubleComparator(numHits, null, null, reversed, false) { + return new DoubleComparator(numHits, fieldname, null, reversed, enableSkipping && this.enableSkipping) { @Override public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException { return new DoubleLeafComparator(context) { diff --git a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/FloatValuesComparatorSource.java b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/FloatValuesComparatorSource.java index 686bef479c179..04a34cd418520 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/FloatValuesComparatorSource.java +++ b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/FloatValuesComparatorSource.java @@ -97,7 +97,7 @@ public FieldComparator newComparator(String fieldname, int numHits, boolean e final float fMissingValue = (Float) missingObject(missingValue, reversed); // NOTE: it's important to pass null as a missing value in the constructor so that // the comparator doesn't check docsWithField since we replace missing values in select() - return new FloatComparator(numHits, null, null, reversed, false) { + return new FloatComparator(numHits, fieldname, null, reversed, enableSkipping && this.enableSkipping) { @Override public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException { return new FloatLeafComparator(context) { diff --git a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/IntValuesComparatorSource.java b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/IntValuesComparatorSource.java index 90afa5fc64c29..d5ea1eaf7263d 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/IntValuesComparatorSource.java +++ b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/IntValuesComparatorSource.java @@ -76,7 +76,7 @@ public FieldComparator newComparator(String fieldname, int numHits, boolean e final int iMissingValue = (Integer) missingObject(missingValue, reversed); // NOTE: it's important to pass null as a missing value in the constructor so that // the comparator doesn't check docsWithField since we replace missing values in select() - return new IntComparator(numHits, null, null, reversed, false) { + return new IntComparator(numHits, fieldname, null, reversed, enableSkipping && this.enableSkipping) { @Override public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException { return new IntLeafComparator(context) { diff --git a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/LongValuesComparatorSource.java b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/LongValuesComparatorSource.java index 462092dca5110..43e033dd59716 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/LongValuesComparatorSource.java +++ b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/LongValuesComparatorSource.java @@ -120,7 +120,7 @@ public FieldComparator newComparator(String fieldname, int numHits, boolean e final long lMissingValue = (Long) missingObject(missingValue, reversed); // NOTE: it's important to pass null as a missing value in the constructor so that // the comparator doesn't check docsWithField since we replace missing values in select() - return new LongComparator(numHits, null, null, reversed, false) { + return new LongComparator(numHits, fieldname, null, reversed, enableSkipping && this.enableSkipping) { @Override public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException { return new LongLeafComparator(context) { diff --git a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/UnsignedLongValuesComparatorSource.java b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/UnsignedLongValuesComparatorSource.java index d8b2e9528d2cf..be56b50179114 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/UnsignedLongValuesComparatorSource.java +++ b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/UnsignedLongValuesComparatorSource.java @@ -92,7 +92,7 @@ public FieldComparator newComparator(String fieldname, int numHits, boolean e assert indexFieldData == null || fieldname.equals(indexFieldData.getFieldName()); final BigInteger ulMissingValue = (BigInteger) missingObject(missingValue, reversed); - return new UnsignedLongComparator(numHits, null, null, reversed, false) { + return new UnsignedLongComparator(numHits, fieldname, null, reversed, enableSkipping && this.enableSkipping) { @Override public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException { return new UnsignedLongLeafComparator(context) { diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java new file mode 100644 index 0000000000000..1eeadfe228a45 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java @@ -0,0 +1,51 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import java.util.Arrays; + +/** + * Utils for remote store + * + * @opensearch.internal + */ +public class RemoteStoreUtils { + public static final int LONG_MAX_LENGTH = String.valueOf(Long.MAX_VALUE).length(); + + /** + * This method subtracts given numbers from Long.MAX_VALUE and returns a string representation of the result. + * The resultant string is guaranteed to be of the same length that of Long.MAX_VALUE. If shorter, we add left padding + * of 0s to the string. + * @param num number to get the inverted long string for + * @return String value of Long.MAX_VALUE - num + */ + public static String invertLong(long num) { + if (num < 0) { + throw new IllegalArgumentException("Negative long values are not allowed"); + } + String invertedLong = String.valueOf(Long.MAX_VALUE - num); + char[] characterArray = new char[LONG_MAX_LENGTH - invertedLong.length()]; + Arrays.fill(characterArray, '0'); + + return new String(characterArray) + invertedLong; + } + + /** + * This method converts the given string into long and subtracts it from Long.MAX_VALUE + * @param str long in string format to be inverted + * @return long value of the invert result + */ + public static long invertLong(String str) { + long num = Long.parseLong(str); + if (num < 0) { + throw new IllegalArgumentException("Strings representing negative long values are not allowed"); + } + return Long.MAX_VALUE - num; + } +} diff --git a/server/src/main/java/org/opensearch/index/search/MatchQuery.java b/server/src/main/java/org/opensearch/index/search/MatchQuery.java index 2c8b091fef8ef..91a4b456bfa2a 100644 --- a/server/src/main/java/org/opensearch/index/search/MatchQuery.java +++ b/server/src/main/java/org/opensearch/index/search/MatchQuery.java @@ -56,7 +56,6 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.QueryBuilder; -import org.apache.lucene.util.TermAndBoost; import org.apache.lucene.util.graph.GraphTokenStreamFiniteStrings; import org.opensearch.OpenSearchException; import org.opensearch.common.io.stream.StreamInput; diff --git a/server/src/main/java/org/opensearch/index/search/MultiMatchQuery.java b/server/src/main/java/org/opensearch/index/search/MultiMatchQuery.java index 554d9714409b7..241f05af2c512 100644 --- a/server/src/main/java/org/opensearch/index/search/MultiMatchQuery.java +++ b/server/src/main/java/org/opensearch/index/search/MultiMatchQuery.java @@ -42,7 +42,6 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.TermAndBoost; import org.opensearch.common.lucene.search.Queries; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.query.AbstractQueryBuilder; diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index e368ee82b1084..01c0a12d463ea 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -44,6 +44,7 @@ import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.index.Term; +import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.ReferenceManager; @@ -382,7 +383,7 @@ public IndexShard( threadPool, this::getEngine, indexSettings.isRemoteTranslogStoreEnabled(), - indexSettings.getRemoteTranslogUploadBufferInterval() + indexSettings::getRemoteTranslogUploadBufferInterval ); this.mapperService = mapperService; this.indexCache = indexCache; @@ -1495,7 +1496,7 @@ public GatedCloseable acquireLastIndexCommitAndRefresh(boolean flus * @throws IOException if there is some failure in acquiring lock in remote store. */ public void acquireLockOnCommitData(String snapshotId, long primaryTerm, long generation) throws IOException { - RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = getRemoteSegmentDirectoryForShard(); + RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = getRemoteDirectory(); remoteSegmentStoreDirectory.acquireLock(primaryTerm, generation, snapshotId); } @@ -1507,20 +1508,10 @@ public void acquireLockOnCommitData(String snapshotId, long primaryTerm, long ge * @throws IOException if there is some failure in releasing lock in remote store. */ public void releaseLockOnCommitData(String snapshotId, long primaryTerm, long generation) throws IOException { - RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = getRemoteSegmentDirectoryForShard(); + RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = getRemoteDirectory(); remoteSegmentStoreDirectory.releaseLock(primaryTerm, generation, snapshotId); } - private RemoteSegmentStoreDirectory getRemoteSegmentDirectoryForShard() { - FilterDirectory remoteStoreDirectory = (FilterDirectory) remoteStore.directory(); - assert remoteStoreDirectory.getDelegate() instanceof FilterDirectory - : "Store.directory is not enclosing an instance of FilterDirectory"; - FilterDirectory byteSizeCachingStoreDirectory = (FilterDirectory) remoteStoreDirectory.getDelegate(); - final Directory remoteDirectory = byteSizeCachingStoreDirectory.getDelegate(); - assert remoteDirectory instanceof RemoteSegmentStoreDirectory : "remoteDirectory is not an instance of RemoteSegmentStoreDirectory"; - return ((RemoteSegmentStoreDirectory) remoteDirectory); - } - public Optional getReplicationEngine() { if (getEngine() instanceof NRTReplicationEngine) { return Optional.of((NRTReplicationEngine) getEngine()); @@ -1579,13 +1570,21 @@ public Tuple, ReplicationCheckpoint> getLatestSegme if (indexSettings.isSegRepEnabled() == false) { return null; } + + Tuple, ReplicationCheckpoint> nullSegmentInfosEmptyCheckpoint = new Tuple<>( + new GatedCloseable<>(null, () -> {}), + ReplicationCheckpoint.empty(shardId, getDefaultCodecName()) + ); + if (getEngineOrNull() == null) { - return new Tuple<>(new GatedCloseable<>(null, () -> {}), ReplicationCheckpoint.empty(shardId, getDefaultCodecName())); + return nullSegmentInfosEmptyCheckpoint; } // do not close the snapshot - caller will close it. - final GatedCloseable snapshot = getSegmentInfosSnapshot(); - return Optional.ofNullable(snapshot.get()).map(segmentInfos -> { - try { + GatedCloseable snapshot = null; + try { + snapshot = getSegmentInfosSnapshot(); + if (snapshot.get() != null) { + SegmentInfos segmentInfos = snapshot.get(); return new Tuple<>( snapshot, new ReplicationCheckpoint( @@ -1601,10 +1600,18 @@ public Tuple, ReplicationCheckpoint> getLatestSegme getEngine().config().getCodec().getName() ) ); - } catch (IOException e) { - throw new OpenSearchException("Error Fetching SegmentInfos and latest checkpoint", e); } - }).orElseGet(() -> new Tuple<>(new GatedCloseable<>(null, () -> {}), ReplicationCheckpoint.empty(shardId, getDefaultCodecName()))); + } catch (IOException | AlreadyClosedException e) { + logger.error("Error Fetching SegmentInfos and latest checkpoint", e); + if (snapshot != null) { + try { + snapshot.close(); + } catch (IOException ex) { + throw new OpenSearchException("Error Closing SegmentInfos Snapshot", e); + } + } + } + return nullSegmentInfosEmptyCheckpoint; } /** @@ -2274,7 +2281,24 @@ public void openEngineAndSkipTranslogRecovery() throws IOException { getEngine().translogManager().skipTranslogRecovery(); } + public void openEngineAndSkipTranslogRecoveryFromSnapshot() throws IOException { + assert routingEntry().recoverySource().getType() == RecoverySource.Type.SNAPSHOT : "not a snapshot recovery [" + + routingEntry() + + "]"; + recoveryState.validateCurrentStage(RecoveryState.Stage.INDEX); + maybeCheckIndex(); + recoveryState.setStage(RecoveryState.Stage.TRANSLOG); + recoveryState.validateCurrentStage(RecoveryState.Stage.TRANSLOG); + loadGlobalCheckpointToReplicationTracker(); + innerOpenEngineAndTranslog(replicationTracker, false); + getEngine().translogManager().skipTranslogRecovery(); + } + private void innerOpenEngineAndTranslog(LongSupplier globalCheckpointSupplier) throws IOException { + innerOpenEngineAndTranslog(globalCheckpointSupplier, true); + } + + private void innerOpenEngineAndTranslog(LongSupplier globalCheckpointSupplier, boolean syncFromRemote) throws IOException { assert Thread.holdsLock(mutex) == false : "opening engine under mutex"; if (state != IndexShardState.RECOVERING) { throw new IndexShardNotRecoveringException(shardId, state); @@ -2293,11 +2317,20 @@ private void innerOpenEngineAndTranslog(LongSupplier globalCheckpointSupplier) t synchronized (engineMutex) { assert currentEngineReference.get() == null : "engine is running"; verifyNotClosed(); - if (indexSettings.isRemoteStoreEnabled()) { + if (indexSettings.isRemoteStoreEnabled() && syncFromRemote) { syncSegmentsFromRemoteSegmentStore(false, true, true); } if (indexSettings.isRemoteTranslogStoreEnabled() && shardRouting.primary()) { - syncRemoteTranslogAndUpdateGlobalCheckpoint(); + if (syncFromRemote) { + syncRemoteTranslogAndUpdateGlobalCheckpoint(); + } else { + // we will enter this block when we do not want to recover from remote translog. + // currently only during snapshot restore, we are coming into this block. + // here, as while initiliazing remote translog we cannot skip downloading translog files, + // so before that step, we are deleting the translog files present in remote store. + deleteTranslogFilesFromRemoteTranslog(); + + } } // we must create a new engine under mutex (see IndexShard#snapshotStoreMetadata). final Engine newEngine = engineFactory.newReadWriteEngine(config); @@ -2589,6 +2622,22 @@ public void restoreFromRemoteStore(ActionListener listener) { storeRecovery.recoverFromRemoteStore(this, listener); } + public void restoreFromSnapshotAndRemoteStore( + Repository repository, + RepositoriesService repositoriesService, + ActionListener listener + ) { + try { + assert shardRouting.primary() : "recover from store only makes sense if the shard is a primary shard"; + assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.SNAPSHOT : "invalid recovery type: " + + recoveryState.getRecoverySource(); + StoreRecovery storeRecovery = new StoreRecovery(shardId, logger); + storeRecovery.recoverFromSnapshotAndRemoteStore(this, repository, repositoriesService, listener, threadPool); + } catch (Exception e) { + listener.onFailure(e); + } + } + public void restoreFromRepository(Repository repository, ActionListener listener) { try { assert shardRouting.primary() : "recover from store only makes sense if the shard is a primary shard"; @@ -3402,6 +3451,15 @@ public void startRecovery( final SnapshotRecoverySource recoverySource = (SnapshotRecoverySource) recoveryState.getRecoverySource(); if (recoverySource.isSearchableSnapshot()) { executeRecovery("from snapshot (remote)", recoveryState, recoveryListener, this::recoverFromStore); + } else if (recoverySource.remoteStoreIndexShallowCopy()) { + final String repo = recoverySource.snapshot().getRepository(); + executeRecovery( + "from snapshot and remote store", + recoveryState, + recoveryListener, + l -> restoreFromSnapshotAndRemoteStore(repositoriesService.repository(repo), repositoriesService, l) + ); + // indicesService.indexService(shardRouting.shardId().getIndex()).addMetadataListener(); } else { final String repo = recoverySource.snapshot().getRepository(); executeRecovery( @@ -4027,7 +4085,7 @@ private static AsyncIOProcessor createTranslogSyncProcessor( ThreadPool threadPool, Supplier engineSupplier, boolean bufferAsyncIoProcessor, - TimeValue bufferInterval + Supplier bufferIntervalSupplier ) { ThreadContext threadContext = threadPool.getThreadContext(); CheckedConsumer>>, IOException> writeConsumer = candidates -> { @@ -4042,7 +4100,7 @@ private static AsyncIOProcessor createTranslogSyncProcessor( } }; if (bufferAsyncIoProcessor) { - return new BufferedAsyncIOProcessor<>(logger, 102400, threadContext, threadPool, bufferInterval) { + return new BufferedAsyncIOProcessor<>(logger, 102400, threadContext, threadPool, bufferIntervalSupplier) { @Override protected void write(List>> candidates) throws IOException { writeConsumer.accept(candidates); @@ -4520,6 +4578,13 @@ private void syncRemoteTranslogAndUpdateGlobalCheckpoint() throws IOException { loadGlobalCheckpointToReplicationTracker(); } + public void deleteTranslogFilesFromRemoteTranslog() throws IOException { + TranslogFactory translogFactory = translogFactorySupplier.apply(indexSettings, shardRouting); + assert translogFactory instanceof RemoteBlobStoreInternalTranslogFactory; + Repository repository = ((RemoteBlobStoreInternalTranslogFactory) translogFactory).getRepository(); + RemoteFsTranslog.cleanup(repository, shardId, getThreadPool()); + } + public void syncTranslogFilesFromRemoteTranslog() throws IOException { TranslogFactory translogFactory = translogFactorySupplier.apply(indexSettings, shardRouting); assert translogFactory instanceof RemoteBlobStoreInternalTranslogFactory; @@ -4542,12 +4607,11 @@ public void syncSegmentsFromRemoteSegmentStore(boolean overrideLocal, boolean re // We need to call RemoteSegmentStoreDirectory.init() in order to get latest metadata of the files that // are uploaded to the remote segment store. RemoteSegmentMetadata remoteSegmentMetadata = remoteDirectory.init(); - Map uploadedSegments = ((RemoteSegmentStoreDirectory) remoteDirectory) + + Map uploadedSegments = remoteDirectory .getSegmentsUploadedToRemoteStore(); store.incRef(); remoteStore.incRef(); - List downloadedSegments = new ArrayList<>(); - List skippedSegments = new ArrayList<>(); try { final Directory storeDirectory; if (recoveryState.getStage() == RecoveryState.Stage.INDEX) { @@ -4564,18 +4628,7 @@ public void syncSegmentsFromRemoteSegmentStore(boolean overrideLocal, boolean re storeDirectory = store.directory(); } Set localSegmentFiles = Sets.newHashSet(storeDirectory.listAll()); - for (String file : uploadedSegments.keySet()) { - long checksum = Long.parseLong(uploadedSegments.get(file).getChecksum()); - if (overrideLocal || localDirectoryContains(storeDirectory, file, checksum) == false) { - if (localSegmentFiles.contains(file)) { - storeDirectory.deleteFile(file); - } - storeDirectory.copyFrom(remoteDirectory, file, file, IOContext.DEFAULT); - downloadedSegments.add(file); - } else { - skippedSegments.add(file); - } - } + copySegmentFiles(storeDirectory, remoteDirectory, null, uploadedSegments, overrideLocal); if (refreshLevelSegmentSync && remoteSegmentMetadata != null) { try ( @@ -4621,13 +4674,113 @@ public void syncSegmentsFromRemoteSegmentStore(boolean overrideLocal, boolean re } catch (IOException e) { throw new IndexShardRecoveryException(shardId, "Exception while copying segment files from remote segment store", e); } finally { - logger.info("Downloaded segments: {}", downloadedSegments); - logger.info("Skipped download for segments: {}", skippedSegments); store.decRef(); remoteStore.decRef(); } } + /** + * Downloads segments from given remote segment store for a specific commit. + * @param overrideLocal flag to override local segment files with those in remote store + * @param sourceRemoteDirectory RemoteSegmentDirectory Instance from which we need to sync segments + * @param primaryTerm Primary Term for shard at the time of commit operation for which we are syncing segments + * @param commitGeneration commit generation at the time of commit operation for which we are syncing segments + * @throws IOException if exception occurs while reading segments from remote store + */ + public void syncSegmentsFromGivenRemoteSegmentStore( + boolean overrideLocal, + RemoteSegmentStoreDirectory sourceRemoteDirectory, + long primaryTerm, + long commitGeneration + ) throws IOException { + logger.info("Downloading segments from given remote segment store"); + RemoteSegmentStoreDirectory remoteDirectory = null; + if (remoteStore != null) { + remoteDirectory = getRemoteDirectory(); + remoteDirectory.init(); + remoteStore.incRef(); + } + Map uploadedSegments = sourceRemoteDirectory + .initializeToSpecificCommit(primaryTerm, commitGeneration) + .getMetadata(); + final Directory storeDirectory = store.directory(); + store.incRef(); + + try { + String segmentsNFile = copySegmentFiles( + storeDirectory, + sourceRemoteDirectory, + remoteDirectory, + uploadedSegments, + overrideLocal + ); + if (segmentsNFile != null) { + try ( + ChecksumIndexInput indexInput = new BufferedChecksumIndexInput( + storeDirectory.openInput(segmentsNFile, IOContext.DEFAULT) + ) + ) { + SegmentInfos infosSnapshot = SegmentInfos.readCommit(store.directory(), indexInput, commitGeneration); + long processedLocalCheckpoint = Long.parseLong(infosSnapshot.getUserData().get(LOCAL_CHECKPOINT_KEY)); + if (remoteStore != null) { + store.commitSegmentInfos(infosSnapshot, processedLocalCheckpoint, processedLocalCheckpoint); + } else { + store.directory().sync(infosSnapshot.files(true)); + store.directory().syncMetaData(); + } + } + } + } catch (IOException e) { + throw new IndexShardRecoveryException(shardId, "Exception while copying segment files from remote segment store", e); + } finally { + store.decRef(); + if (remoteStore != null) { + remoteStore.decRef(); + } + } + } + + private String copySegmentFiles( + Directory storeDirectory, + RemoteSegmentStoreDirectory sourceRemoteDirectory, + RemoteSegmentStoreDirectory targetRemoteDirectory, + Map uploadedSegments, + boolean overrideLocal + ) throws IOException { + List downloadedSegments = new ArrayList<>(); + List skippedSegments = new ArrayList<>(); + String segmentNFile = null; + try { + Set localSegmentFiles = Sets.newHashSet(storeDirectory.listAll()); + if (overrideLocal) { + for (String file : localSegmentFiles) { + storeDirectory.deleteFile(file); + } + } + for (String file : uploadedSegments.keySet()) { + long checksum = Long.parseLong(uploadedSegments.get(file).getChecksum()); + if (overrideLocal || localDirectoryContains(storeDirectory, file, checksum) == false) { + storeDirectory.copyFrom(sourceRemoteDirectory, file, file, IOContext.DEFAULT); + storeDirectory.sync(Collections.singleton(file)); + downloadedSegments.add(file); + } else { + skippedSegments.add(file); + } + if (targetRemoteDirectory != null) { + targetRemoteDirectory.copyFrom(storeDirectory, file, file, IOContext.DEFAULT); + } + if (file.startsWith(IndexFileNames.SEGMENTS)) { + assert segmentNFile == null : "There should be only one SegmentInfosSnapshot file"; + segmentNFile = file; + } + } + } finally { + logger.info("Downloaded segments here: {}", downloadedSegments); + logger.info("Skipped download for segments here: {}", skippedSegments); + } + return segmentNFile; + } + private boolean localDirectoryContains(Directory localDirectory, String file, long checksum) { try (IndexInput indexInput = localDirectory.openInput(file, IOContext.DEFAULT)) { if (checksum == CodecUtil.retrieveChecksum(indexInput)) { diff --git a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java index d7f7373e83bd0..ddca12d9283f3 100644 --- a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java @@ -29,6 +29,7 @@ import org.opensearch.index.remote.RemoteRefreshSegmentTracker; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.store.RemoteSegmentStoreDirectory; +import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.threadpool.Scheduler; @@ -71,6 +72,8 @@ public final class RemoteStoreRefreshListener implements ReferenceManager.Refres */ private static final int REMOTE_REFRESH_RETRY_MAX_INTERVAL_MILLIS = 10_000; + private static final int INVALID_PRIMARY_TERM = -1; + /** * Exponential back off policy with max retry interval. */ @@ -82,7 +85,7 @@ public final class RemoteStoreRefreshListener implements ReferenceManager.Refres // Visible for testing static final Set EXCLUDE_FILES = Set.of("write.lock"); // Visible for testing - static final int LAST_N_METADATA_FILES_TO_KEEP = 10; + public static final int LAST_N_METADATA_FILES_TO_KEEP = 10; private final IndexShard indexShard; private final Directory storeDirectory; @@ -118,15 +121,18 @@ public RemoteStoreRefreshListener( this.storeDirectory = indexShard.store().directory(); this.remoteDirectory = (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) indexShard.remoteStore().directory()) .getDelegate()).getDelegate(); - this.primaryTerm = indexShard.getOperationPrimaryTerm(); localSegmentChecksumMap = new HashMap<>(); + RemoteSegmentMetadata remoteSegmentMetadata = null; if (indexShard.routingEntry().primary()) { try { - this.remoteDirectory.init(); + remoteSegmentMetadata = this.remoteDirectory.init(); } catch (IOException e) { logger.error("Exception while initialising RemoteSegmentStoreDirectory", e); } } + // initializing primary term with the primary term of latest metadata in remote store. + // if no metadata is present, this value will be initilized with -1. + this.primaryTerm = remoteSegmentMetadata != null ? remoteSegmentMetadata.getPrimaryTerm() : INVALID_PRIMARY_TERM; this.segmentTracker = segmentTracker; resetBackOffDelayIterator(); this.checkpointPublisher = checkpointPublisher; @@ -163,8 +169,9 @@ public void beforeRefresh() throws IOException {} */ @Override public void afterRefresh(boolean didRefresh) { - - if (didRefresh || remoteDirectory.getSegmentsUploadedToRemoteStore().isEmpty()) { + if (this.primaryTerm != indexShard.getOperationPrimaryTerm() + || didRefresh + || remoteDirectory.getSegmentsUploadedToRemoteStore().isEmpty()) { updateLocalRefreshTimeAndSeqNo(); try { indexShard.getThreadPool().executor(ThreadPool.Names.REMOTE_REFRESH).submit(() -> syncSegments(false)).get(); @@ -193,9 +200,8 @@ private synchronized void syncSegments(boolean isRetry) { // if a new segments_N file is present in local that is not uploaded to remote store yet, it // is considered as a first refresh post commit. A cleanup of stale commit files is triggered. // This is done to avoid delete post each refresh. - // Ideally, we want this to be done in async flow. (GitHub issue #4315) if (isRefreshAfterCommit()) { - deleteStaleCommits(); + remoteDirectory.deleteStaleSegmentsAsync(LAST_N_METADATA_FILES_TO_KEEP); } try (GatedCloseable segmentInfosGatedCloseable = indexShard.getSegmentInfosSnapshot()) { @@ -374,14 +380,6 @@ private String getChecksumOfLocalFile(String file) throws IOException { return localSegmentChecksumMap.get(file); } - private void deleteStaleCommits() { - try { - remoteDirectory.deleteStaleSegments(LAST_N_METADATA_FILES_TO_KEEP); - } catch (IOException e) { - logger.info("Exception while deleting stale commits from remote segment store, will retry delete post next commit", e); - } - } - /** * Updates the last refresh time and refresh seq no which is seen by local store. */ diff --git a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java index a81dc96ff1145..da4e9113143af 100644 --- a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java @@ -59,14 +59,20 @@ import org.opensearch.index.mapper.MapperService; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.snapshots.IndexShardRestoreFailedException; +import org.opensearch.index.snapshots.blobstore.RemoteStoreShardShallowCopySnapshot; +import org.opensearch.index.store.RemoteSegmentStoreDirectory; +import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory; import org.opensearch.index.store.Store; import org.opensearch.index.translog.Translog; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.replication.common.ReplicationLuceneIndex; import org.opensearch.repositories.IndexId; +import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; +import org.opensearch.threadpool.ThreadPool; import java.io.IOException; +import java.nio.channels.FileChannel; import java.util.Arrays; import java.util.HashMap; import java.util.List; @@ -347,6 +353,76 @@ void recoverFromRepository(final IndexShard indexShard, Repository repository, A } } + void recoverFromSnapshotAndRemoteStore( + final IndexShard indexShard, + Repository repository, + RepositoriesService repositoriesService, + ActionListener listener, + ThreadPool threadPool + ) { + try { + if (canRecover(indexShard)) { + indexShard.preRecovery(); + RecoverySource.Type recoveryType = indexShard.recoveryState().getRecoverySource().getType(); + assert recoveryType == RecoverySource.Type.SNAPSHOT : "expected snapshot recovery type: " + recoveryType; + SnapshotRecoverySource recoverySource = (SnapshotRecoverySource) indexShard.recoveryState().getRecoverySource(); + final RecoveryState.Translog translogState = indexShard.recoveryState().getTranslog(); + translogState.totalOperations(0); + translogState.totalOperationsOnStart(0); + indexShard.prepareForIndexRecovery(); + + RemoteStoreShardShallowCopySnapshot shallowCopyShardMetadata = repository.getRemoteStoreShallowCopyShardMetadata( + recoverySource.snapshot().getSnapshotId(), + recoverySource.index(), + shardId + ); + + long primaryTerm = shallowCopyShardMetadata.getPrimaryTerm(); + long commitGeneration = shallowCopyShardMetadata.getCommitGeneration(); + String indexUUID = shallowCopyShardMetadata.getIndexUUID(); + String remoteStoreRepository = ((SnapshotRecoverySource) indexShard.recoveryState().getRecoverySource()) + .sourceRemoteStoreRepository(); + if (remoteStoreRepository == null) { + remoteStoreRepository = shallowCopyShardMetadata.getRemoteStoreRepository(); + } + + RemoteSegmentStoreDirectoryFactory directoryFactory = new RemoteSegmentStoreDirectoryFactory( + () -> repositoriesService, + threadPool + ); + RemoteSegmentStoreDirectory sourceRemoteDirectory = (RemoteSegmentStoreDirectory) directoryFactory.newDirectory( + remoteStoreRepository, + indexUUID, + String.valueOf(shardId.id()) + ); + indexShard.syncSegmentsFromGivenRemoteSegmentStore(true, sourceRemoteDirectory, primaryTerm, commitGeneration); + final Store store = indexShard.store(); + if (indexShard.indexSettings.isRemoteTranslogStoreEnabled() == false) { + bootstrap(indexShard, store); + } else { + bootstrapForSnapshot(indexShard, store); + } + assert indexShard.shardRouting.primary() : "only primary shards can recover from store"; + writeEmptyRetentionLeasesFile(indexShard); + indexShard.recoveryState().getIndex().setFileDetailsComplete(); + if (indexShard.indexSettings.isRemoteStoreEnabled()) { + indexShard.openEngineAndSkipTranslogRecoveryFromSnapshot(); + } else { + indexShard.openEngineAndRecoverFromTranslog(); + } + indexShard.getEngine().fillSeqNoGaps(indexShard.getPendingPrimaryTerm()); + indexShard.finalizeRecovery(); + indexShard.postRecovery("restore done"); + + listener.onResponse(true); + } else { + listener.onResponse(false); + } + } catch (Exception e) { + listener.onFailure(e); + } + } + private boolean canRecover(IndexShard indexShard) { if (indexShard.state() == IndexShardState.CLOSED) { // got closed on us, just ignore this recovery @@ -597,10 +673,18 @@ private void restore( } final ActionListener restoreListener = ActionListener.wrap(v -> { final Store store = indexShard.store(); - bootstrap(indexShard, store); + if (indexShard.indexSettings.isRemoteTranslogStoreEnabled() == false) { + bootstrap(indexShard, store); + } else { + bootstrapForSnapshot(indexShard, store); + } assert indexShard.shardRouting.primary() : "only primary shards can recover from store"; writeEmptyRetentionLeasesFile(indexShard); - indexShard.openEngineAndRecoverFromTranslog(); + if (indexShard.indexSettings.isRemoteStoreEnabled()) { + indexShard.openEngineAndSkipTranslogRecoveryFromSnapshot(); + } else { + indexShard.openEngineAndRecoverFromTranslog(); + } indexShard.getEngine().fillSeqNoGaps(indexShard.getPendingPrimaryTerm()); indexShard.finalizeRecovery(); indexShard.postRecovery("restore done"); @@ -644,6 +728,21 @@ private void restore( } } + private void bootstrapForSnapshot(final IndexShard indexShard, final Store store) throws IOException { + store.bootstrapNewHistory(); + final SegmentInfos segmentInfos = store.readLastCommittedSegmentsInfo(); + final long localCheckpoint = Long.parseLong(segmentInfos.userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)); + String translogUUID = store.readLastCommittedSegmentsInfo().getUserData().get(Translog.TRANSLOG_UUID_KEY); + Translog.createEmptyTranslog( + indexShard.shardPath().resolveTranslog(), + shardId, + localCheckpoint, + indexShard.getPendingPrimaryTerm(), + translogUUID, + FileChannel::open + ); + } + private void bootstrap(final IndexShard indexShard, final Store store) throws IOException { store.bootstrapNewHistory(); final SegmentInfos segmentInfos = store.readLastCommittedSegmentsInfo(); diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java index 15c6fbea99148..ac129aca8baf7 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -27,6 +27,7 @@ import org.opensearch.index.store.lockmanager.RemoteStoreLockManager; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadataHandler; +import org.opensearch.threadpool.ThreadPool; import java.io.FileNotFoundException; import java.io.IOException; @@ -75,6 +76,8 @@ public final class RemoteSegmentStoreDirectory extends FilterDirectory implement private final RemoteStoreLockManager mdLockManager; + private final ThreadPool threadPool; + /** * To prevent explosion of refresh metadata files, we replace refresh files for the given primary term and generation * This is achieved by uploading refresh metadata file with the same UUID suffix. @@ -96,15 +99,23 @@ public final class RemoteSegmentStoreDirectory extends FilterDirectory implement private static final Logger logger = LogManager.getLogger(RemoteSegmentStoreDirectory.class); + /** + * AtomicBoolean that ensures only one staleCommitDeletion activity is scheduled at a time. + * Visible for testing + */ + protected final AtomicBoolean canDeleteStaleCommits = new AtomicBoolean(true); + public RemoteSegmentStoreDirectory( RemoteDirectory remoteDataDirectory, RemoteDirectory remoteMetadataDirectory, - RemoteStoreLockManager mdLockManager + RemoteStoreLockManager mdLockManager, + ThreadPool threadPool ) throws IOException { super(remoteDataDirectory); this.remoteDataDirectory = remoteDataDirectory; this.remoteMetadataDirectory = remoteMetadataDirectory; this.mdLockManager = mdLockManager; + this.threadPool = threadPool; init(); } @@ -126,6 +137,24 @@ public RemoteSegmentMetadata init() throws IOException { return remoteSegmentMetadata; } + /** + * Initializes the cache to a specific commit which keeps track of all the segment files uploaded to the + * remote segment store. + * this is currently used to restore snapshots, where we want to copy segment files from a given commit. + * TODO: check if we can return read only RemoteSegmentStoreDirectory object from here. + * @throws IOException if there were any failures in reading the metadata file + */ + public RemoteSegmentMetadata initializeToSpecificCommit(long primaryTerm, long commitGeneration) throws IOException { + String metadataFile = getMetadataFileForCommit(primaryTerm, commitGeneration); + RemoteSegmentMetadata remoteSegmentMetadata = readMetadataFile(metadataFile); + if (remoteSegmentMetadata != null) { + this.segmentsUploadedToRemoteStore = new ConcurrentHashMap<>(remoteSegmentMetadata.getMetadata()); + } else { + this.segmentsUploadedToRemoteStore = new ConcurrentHashMap<>(); + } + return remoteSegmentMetadata; + } + /** * Read the latest metadata file to get the list of segments uploaded to the remote segment store. * We upload a metadata file per refresh, but it is not unique per refresh. Refresh metadata file is unique for a given commit. @@ -485,6 +514,7 @@ public void uploadMetadata( new RemoteSegmentMetadata( RemoteSegmentMetadata.fromMapOfStrings(uploadedSegments), segmentInfoSnapshotByteArray, + primaryTerm, segmentInfosSnapshot.getGeneration() ) ); @@ -555,7 +585,7 @@ public Map getSegmentsUploadedToRemoteStore(lon * @param lastNMetadataFilesToKeep number of metadata files to keep * @throws IOException in case of I/O error while reading from / writing to remote segment store */ - public void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException { + private void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException { Collection metadataFiles = remoteMetadataDirectory.listFilesByPrefix(MetadataFilenameUtils.METADATA_PREFIX); List sortedMetadataFileList = metadataFiles.stream().sorted(METADATA_FILENAME_COMPARATOR).collect(Collectors.toList()); if (sortedMetadataFileList.size() <= lastNMetadataFilesToKeep) { @@ -637,6 +667,33 @@ public void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException } } + /** + * Delete stale segment and metadata files asynchronously. + * This method calls {@link RemoteSegmentStoreDirectory#deleteStaleSegments(int)} in an async manner. + * @param lastNMetadataFilesToKeep number of metadata files to keep + */ + public void deleteStaleSegmentsAsync(int lastNMetadataFilesToKeep) { + if (canDeleteStaleCommits.compareAndSet(true, false)) { + try { + threadPool.executor(ThreadPool.Names.REMOTE_PURGE).execute(() -> { + try { + deleteStaleSegments(lastNMetadataFilesToKeep); + } catch (Exception e) { + logger.info( + "Exception while deleting stale commits from remote segment store, will retry delete post next commit", + e + ); + } finally { + canDeleteStaleCommits.set(true); + } + }); + } catch (Exception e) { + logger.info("Exception occurred while scheduling deleteStaleCommits", e); + canDeleteStaleCommits.set(true); + } + } + } + /* Tries to delete shard level directory if it is empty Return true if it deleted it successfully @@ -661,7 +718,7 @@ private boolean deleteIfEmpty() throws IOException { } public void close() throws IOException { - deleteStaleSegments(0); + deleteStaleSegmentsAsync(0); deleteIfEmpty(); } } diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java index 388f80ea3e480..3bec84f287ce4 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java @@ -20,6 +20,7 @@ import org.opensearch.repositories.Repository; import org.opensearch.repositories.RepositoryMissingException; import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.threadpool.ThreadPool; import java.io.IOException; import java.util.function.Supplier; @@ -34,8 +35,11 @@ public class RemoteSegmentStoreDirectoryFactory implements IndexStorePlugin.Dire private final Supplier repositoriesService; - public RemoteSegmentStoreDirectoryFactory(Supplier repositoriesService) { + private final ThreadPool threadPool; + + public RemoteSegmentStoreDirectoryFactory(Supplier repositoriesService, ThreadPool threadPool) { this.repositoriesService = repositoriesService; + this.threadPool = threadPool; } @Override @@ -43,6 +47,11 @@ public Directory newDirectory(IndexSettings indexSettings, ShardPath path) throw String repositoryName = indexSettings.getRemoteStoreRepository(); String indexUUID = indexSettings.getIndex().getUUID(); String shardId = String.valueOf(path.getShardId().getId()); + + return newDirectory(repositoryName, indexUUID, shardId); + } + + public Directory newDirectory(String repositoryName, String indexUUID, String shardId) throws IOException { try (Repository repository = repositoriesService.get().repository(repositoryName)) { assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; BlobPath commonBlobPath = ((BlobStoreRepository) repository).basePath(); @@ -57,7 +66,7 @@ public Directory newDirectory(IndexSettings indexSettings, ShardPath path) throw shardId ); - return new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory, mdLockManager); + return new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory, mdLockManager, threadPool); } catch (RepositoryMissingException e) { throw new IllegalArgumentException("Repository should be created before creating index with remote_store enabled setting", e); } diff --git a/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheFactory.java b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheFactory.java index 5ed1f5e0ee21b..f23e057196096 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheFactory.java +++ b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheFactory.java @@ -11,7 +11,6 @@ import org.opensearch.common.breaker.CircuitBreaker; import org.opensearch.common.cache.RemovalReason; import org.opensearch.index.store.remote.utils.cache.SegmentedCache; -import org.opensearch.index.store.remote.file.OnDemandBlockSnapshotIndexInput; import java.nio.file.Files; import java.nio.file.Path; @@ -39,24 +38,11 @@ public class FileCacheFactory { public static FileCache createConcurrentLRUFileCache(long capacity, CircuitBreaker circuitBreaker) { - return createFileCache(createDefaultBuilder().capacity(capacity).build(), circuitBreaker); + return new FileCache(createDefaultBuilder().capacity(capacity).build(), circuitBreaker); } public static FileCache createConcurrentLRUFileCache(long capacity, int concurrencyLevel, CircuitBreaker circuitBreaker) { - return createFileCache(createDefaultBuilder().capacity(capacity).concurrencyLevel(concurrencyLevel).build(), circuitBreaker); - } - - private static FileCache createFileCache(SegmentedCache segmentedCache, CircuitBreaker circuitBreaker) { - /* - * Since OnDemandBlockSnapshotIndexInput.Builder.DEFAULT_BLOCK_SIZE is not overridden then it will be upper bound for max IndexInput - * size on disk. A single IndexInput size should always be more than a single segment in segmented cache. A FileCache capacity might - * be defined with large capacity (> IndexInput block size) but due to segmentation and concurrency factor, that capacity is - * distributed equally across segments. - */ - if (segmentedCache.getPerSegmentCapacity() <= OnDemandBlockSnapshotIndexInput.Builder.DEFAULT_BLOCK_SIZE) { - throw new IllegalStateException("FileSystem Cache per segment capacity is less than single IndexInput default block size"); - } - return new FileCache(segmentedCache, circuitBreaker); + return new FileCache(createDefaultBuilder().capacity(capacity).concurrencyLevel(concurrencyLevel).build(), circuitBreaker); } private static SegmentedCache.Builder createDefaultBuilder() { diff --git a/server/src/main/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadata.java b/server/src/main/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadata.java index 2a84fbfb89c93..9a479346ff711 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadata.java +++ b/server/src/main/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadata.java @@ -38,16 +38,19 @@ public class RemoteSegmentMetadata { private final byte[] segmentInfosBytes; + private final long primaryTerm; private final long generation; public RemoteSegmentMetadata( Map metadata, byte[] segmentInfosBytes, + long primaryTerm, long generation ) { this.metadata = metadata; this.segmentInfosBytes = segmentInfosBytes; this.generation = generation; + this.primaryTerm = primaryTerm; } /** @@ -66,6 +69,10 @@ public long getGeneration() { return generation; } + public long getPrimaryTerm() { + return primaryTerm; + } + /** * Generate {@code Map} from {@link RemoteSegmentMetadata} * @return {@code Map} @@ -93,6 +100,7 @@ public static Map f public void write(IndexOutput out) throws IOException { out.writeMapOfStrings(toMapOfStrings()); out.writeLong(generation); + out.writeLong(primaryTerm); out.writeLong(segmentInfosBytes.length); out.writeBytes(segmentInfosBytes, segmentInfosBytes.length); } @@ -100,9 +108,10 @@ public void write(IndexOutput out) throws IOException { public static RemoteSegmentMetadata read(IndexInput indexInput) throws IOException { Map metadata = indexInput.readMapOfStrings(); long generation = indexInput.readLong(); + long primaryTerm = indexInput.readLong(); int byteArraySize = (int) indexInput.readLong(); byte[] segmentInfosBytes = new byte[byteArraySize]; indexInput.readBytes(segmentInfosBytes, 0, byteArraySize); - return new RemoteSegmentMetadata(RemoteSegmentMetadata.fromMapOfStrings(metadata), segmentInfosBytes, generation); + return new RemoteSegmentMetadata(RemoteSegmentMetadata.fromMapOfStrings(metadata), segmentInfosBytes, primaryTerm, generation); } } diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java index 6ebb1bf7d2252..04057b581e8d9 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java @@ -423,6 +423,20 @@ private void deleteStaleRemotePrimaryTermsAndMetadataFiles() { } } + public static void cleanup(Repository repository, ShardId shardId, ThreadPool threadPool) throws IOException { + assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; + BlobStoreRepository blobStoreRepository = (BlobStoreRepository) repository; + FileTransferTracker fileTransferTracker = new FileTransferTracker(shardId); + TranslogTransferManager translogTransferManager = buildTranslogTransferManager( + blobStoreRepository, + threadPool, + shardId, + fileTransferTracker + ); + // clean up all remote translog files + translogTransferManager.deleteTranslogFiles(); + } + protected void onDelete() { if (primaryModeSupplier.getAsBoolean() == false) { logger.trace("skipped delete translog"); diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java index 58aca00d2e9d3..f6405bc9b5c82 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java @@ -361,6 +361,11 @@ public void onFailure(Exception e) { }); } + public void deleteTranslogFiles() throws IOException { + transferService.delete(remoteMetadataTransferPath); + transferService.delete(remoteDataTransferPath); + } + /** * Deletes list of translog files asynchronously using the {@code REMOTE_PURGE} threadpool. * diff --git a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java index 4a0fab82f9adc..e4b251914fa0b 100644 --- a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java @@ -107,6 +107,7 @@ import java.util.function.BiConsumer; import java.util.function.Consumer; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; import static org.opensearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.CLOSED; import static org.opensearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.DELETED; import static org.opensearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.FAILURE; @@ -544,7 +545,19 @@ private void createIndices(final ClusterState state) { AllocatedIndex indexService = null; try { - indexService = indicesService.createIndex(indexMetadata, builtInIndexListener, true); + List updatedIndexEventListeners = new ArrayList<>(builtInIndexListener); + if (entry.getValue().size() > 0 + && entry.getValue().get(0).recoverySource().getType() == Type.SNAPSHOT + && indexMetadata.getSettings().getAsBoolean(SETTING_REMOTE_STORE_ENABLED, false)) { + final IndexEventListener refreshListenerAfterSnapshotRestore = new IndexEventListener() { + @Override + public void afterIndexShardStarted(IndexShard indexShard) { + indexShard.refresh("refresh to upload metadata to remote store"); + } + }; + updatedIndexEventListeners.add(refreshListenerAfterSnapshotRestore); + } + indexService = indicesService.createIndex(indexMetadata, updatedIndexEventListeners, true); if (indexService.updateMapping(null, indexMetadata) && sendRefreshMapping) { nodeMappingRefreshAction.nodeMappingRefresh( state.nodes().getClusterManagerNode(), diff --git a/server/src/main/java/org/opensearch/ingest/CompoundProcessor.java b/server/src/main/java/org/opensearch/ingest/CompoundProcessor.java index 8cdbc487dc137..a5f4870029e87 100644 --- a/server/src/main/java/org/opensearch/ingest/CompoundProcessor.java +++ b/server/src/main/java/org/opensearch/ingest/CompoundProcessor.java @@ -34,6 +34,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.common.collect.Tuple; +import org.opensearch.common.metrics.OperationMetrics; import java.util.ArrayList; import java.util.Arrays; @@ -60,7 +61,7 @@ public class CompoundProcessor implements Processor { private final boolean ignoreFailure; private final List processors; private final List onFailureProcessors; - private final List> processorsWithMetrics; + private final List> processorsWithMetrics; private final LongSupplier relativeTimeProvider; CompoundProcessor(LongSupplier relativeTimeProvider, Processor... processor) { @@ -87,10 +88,10 @@ public CompoundProcessor(boolean ignoreFailure, List processors, List this.onFailureProcessors = onFailureProcessors; this.relativeTimeProvider = relativeTimeProvider; this.processorsWithMetrics = new ArrayList<>(processors.size()); - processors.forEach(p -> processorsWithMetrics.add(new Tuple<>(p, new IngestMetric()))); + processors.forEach(p -> processorsWithMetrics.add(new Tuple<>(p, new OperationMetrics()))); } - List> getProcessorsWithMetrics() { + List> getProcessorsWithMetrics() { return processorsWithMetrics; } @@ -155,17 +156,17 @@ void innerExecute(int currentProcessor, IngestDocument ingestDocument, BiConsume return; } - Tuple processorWithMetric = processorsWithMetrics.get(currentProcessor); + Tuple processorWithMetric = processorsWithMetrics.get(currentProcessor); final Processor processor = processorWithMetric.v1(); - final IngestMetric metric = processorWithMetric.v2(); + final OperationMetrics metric = processorWithMetric.v2(); final long startTimeInNanos = relativeTimeProvider.getAsLong(); - metric.preIngest(); + metric.before(); processor.execute(ingestDocument, (result, e) -> { long ingestTimeInMillis = TimeUnit.NANOSECONDS.toMillis(relativeTimeProvider.getAsLong() - startTimeInNanos); - metric.postIngest(ingestTimeInMillis); + metric.after(ingestTimeInMillis); if (e != null) { - metric.ingestFailed(); + metric.failed(); if (ignoreFailure) { innerExecute(currentProcessor + 1, ingestDocument, handler); } else { diff --git a/server/src/main/java/org/opensearch/ingest/ConditionalProcessor.java b/server/src/main/java/org/opensearch/ingest/ConditionalProcessor.java index 591a71fd72b8f..8bf489805f7ca 100644 --- a/server/src/main/java/org/opensearch/ingest/ConditionalProcessor.java +++ b/server/src/main/java/org/opensearch/ingest/ConditionalProcessor.java @@ -32,6 +32,7 @@ package org.opensearch.ingest; +import org.opensearch.common.metrics.OperationMetrics; import org.opensearch.script.IngestConditionalScript; import org.opensearch.script.Script; import org.opensearch.script.ScriptException; @@ -66,7 +67,7 @@ public class ConditionalProcessor extends AbstractProcessor implements WrappingP private final Script condition; private final ScriptService scriptService; private final Processor processor; - private final IngestMetric metric; + private final OperationMetrics metric; private final LongSupplier relativeTimeProvider; private final IngestConditionalScript precompiledConditionScript; @@ -86,7 +87,7 @@ public class ConditionalProcessor extends AbstractProcessor implements WrappingP this.condition = script; this.scriptService = scriptService; this.processor = processor; - this.metric = new IngestMetric(); + this.metric = new OperationMetrics(); this.relativeTimeProvider = relativeTimeProvider; try { @@ -114,12 +115,12 @@ public void execute(IngestDocument ingestDocument, BiConsumer { long ingestTimeInMillis = TimeUnit.NANOSECONDS.toMillis(relativeTimeProvider.getAsLong() - startTimeInNanos); - metric.postIngest(ingestTimeInMillis); + metric.after(ingestTimeInMillis); if (e != null) { - metric.ingestFailed(); + metric.failed(); handler.accept(null, e); } else { handler.accept(result, null); @@ -148,7 +149,7 @@ public Processor getInnerProcessor() { return processor; } - IngestMetric getMetric() { + OperationMetrics getMetric() { return metric; } diff --git a/server/src/main/java/org/opensearch/ingest/IngestMetric.java b/server/src/main/java/org/opensearch/ingest/IngestMetric.java deleted file mode 100644 index 2d4a1dc9cfdee..0000000000000 --- a/server/src/main/java/org/opensearch/ingest/IngestMetric.java +++ /dev/null @@ -1,112 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.ingest; - -import org.opensearch.common.metrics.CounterMetric; -import org.opensearch.common.metrics.MeanMetric; - -import java.util.concurrent.atomic.AtomicLong; - -/** - *

Metrics to measure ingest actions. - *

This counts measure documents and timings for a given scope. - * The scope is determined by the calling code. For example you can use this class to count all documents across all pipeline, - * or you can use this class to count documents for a given pipeline or a specific processor. - * This class does not make assumptions about it's given scope. - * - * @opensearch.internal - */ -class IngestMetric { - - /** - * The time it takes to complete the measured item. - */ - private final MeanMetric ingestTime = new MeanMetric(); - /** - * The current count of things being measure. Should most likely ever be 0 or 1. - * Useful when aggregating multiple metrics to see how many things are in flight. - */ - private final AtomicLong ingestCurrent = new AtomicLong(); - /** - * The ever increasing count of things being measured - */ - private final CounterMetric ingestCount = new CounterMetric(); - /** - * The only increasing count of failures - */ - private final CounterMetric ingestFailed = new CounterMetric(); - - /** - * Call this prior to the ingest action. - */ - void preIngest() { - ingestCurrent.incrementAndGet(); - } - - /** - * Call this after the performing the ingest action, even if the action failed. - * @param ingestTimeInMillis The time it took to perform the action. - */ - void postIngest(long ingestTimeInMillis) { - ingestCurrent.decrementAndGet(); - ingestTime.inc(ingestTimeInMillis); - ingestCount.inc(); - } - - /** - * Call this if the ingest action failed. - */ - void ingestFailed() { - ingestFailed.inc(); - } - - /** - *

Add two sets of metrics together. - *

Note - this method does not add the current count values. - * The current count value is ephemeral and requires a increase/decrease operation pairs to keep the value correct. - * - * @param metrics The metric to add. - */ - void add(IngestMetric metrics) { - ingestCount.inc(metrics.ingestCount.count()); - ingestTime.inc(metrics.ingestTime.sum()); - ingestFailed.inc(metrics.ingestFailed.count()); - } - - /** - * Creates a serializable representation for these metrics. - */ - IngestStats.Stats createStats() { - return new IngestStats.Stats(ingestCount.count(), ingestTime.sum(), ingestCurrent.get(), ingestFailed.count()); - } -} diff --git a/server/src/main/java/org/opensearch/ingest/IngestService.java b/server/src/main/java/org/opensearch/ingest/IngestService.java index b9785d9ec036f..0984046ca3077 100644 --- a/server/src/main/java/org/opensearch/ingest/IngestService.java +++ b/server/src/main/java/org/opensearch/ingest/IngestService.java @@ -60,6 +60,7 @@ import org.opensearch.cluster.service.ClusterManagerTaskThrottler; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.collect.Tuple; +import org.opensearch.common.metrics.OperationMetrics; import org.opensearch.common.regex.Regex; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; @@ -114,7 +115,7 @@ public class IngestService implements ClusterStateApplier, ReportingService pipelines = Collections.emptyMap(); private final ThreadPool threadPool; - private final IngestMetric totalMetrics = new IngestMetric(); + private final OperationMetrics totalMetrics = new OperationMetrics(); private final List> ingestClusterStateListeners = new CopyOnWriteArrayList<>(); private final ClusterManagerTaskThrottler.ThrottlingKey putPipelineTaskKey; private final ClusterManagerTaskThrottler.ThrottlingKey deletePipelineTaskKey; @@ -440,17 +441,17 @@ Map pipelines() { * Recursive method to obtain all of the non-failure processors for given compoundProcessor. Since conditionals are implemented as * wrappers to the actual processor, always prefer the actual processor's metric over the conditional processor's metric. * @param compoundProcessor The compound processor to start walking the non-failure processors - * @param processorMetrics The list of {@link Processor} {@link IngestMetric} tuples. + * @param processorMetrics The list of {@link Processor} {@link OperationMetrics} tuples. * @return the processorMetrics for all non-failure processor that belong to the original compoundProcessor */ - private static List> getProcessorMetrics( + private static List> getProcessorMetrics( CompoundProcessor compoundProcessor, - List> processorMetrics + List> processorMetrics ) { // only surface the top level non-failure processors, on-failure processor times will be included in the top level non-failure - for (Tuple processorWithMetric : compoundProcessor.getProcessorsWithMetrics()) { + for (Tuple processorWithMetric : compoundProcessor.getProcessorsWithMetrics()) { Processor processor = processorWithMetric.v1(); - IngestMetric metric = processorWithMetric.v2(); + OperationMetrics metric = processorWithMetric.v2(); if (processor instanceof CompoundProcessor) { getProcessorMetrics((CompoundProcessor) processor, processorMetrics); } else { @@ -614,7 +615,7 @@ private void executePipelines( if (Objects.equals(originalIndex, newIndex) == false) { if (hasFinalPipeline && it.hasNext() == false) { - totalMetrics.ingestFailed(); + totalMetrics.failed(); onFailure.accept( slot, new IllegalStateException("final pipeline [" + pipelineId + "] can't change the target index") @@ -680,11 +681,11 @@ public IngestStats stats() { Pipeline pipeline = holder.pipeline; CompoundProcessor rootProcessor = pipeline.getCompoundProcessor(); statsBuilder.addPipelineMetrics(id, pipeline.getMetrics()); - List> processorMetrics = new ArrayList<>(); + List> processorMetrics = new ArrayList<>(); getProcessorMetrics(rootProcessor, processorMetrics); processorMetrics.forEach(t -> { Processor processor = t.v1(); - IngestMetric processorMetric = t.v2(); + OperationMetrics processorMetric = t.v2(); statsBuilder.addProcessorMetrics(id, getProcessorName(processor), processor.getType(), processorMetric); }); }); @@ -739,7 +740,7 @@ private void innerExecute( long startTimeInNanos = System.nanoTime(); // the pipeline specific stat holder may not exist and that is fine: // (e.g. the pipeline may have been removed while we're ingesting a document - totalMetrics.preIngest(); + totalMetrics.before(); String index = indexRequest.index(); String id = indexRequest.id(); String routing = indexRequest.routing(); @@ -749,9 +750,9 @@ private void innerExecute( IngestDocument ingestDocument = new IngestDocument(index, id, routing, version, versionType, sourceAsMap); ingestDocument.executePipeline(pipeline, (result, e) -> { long ingestTimeInMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTimeInNanos); - totalMetrics.postIngest(ingestTimeInMillis); + totalMetrics.after(ingestTimeInMillis); if (e != null) { - totalMetrics.ingestFailed(); + totalMetrics.failed(); handler.accept(e); } else if (result == null) { itemDroppedHandler.accept(slot); @@ -835,22 +836,22 @@ void innerUpdatePipelines(IngestMetadata newIngestMetadata) { } Pipeline oldPipeline = previous.pipeline; newPipeline.getMetrics().add(oldPipeline.getMetrics()); - List> oldPerProcessMetrics = new ArrayList<>(); - List> newPerProcessMetrics = new ArrayList<>(); + List> oldPerProcessMetrics = new ArrayList<>(); + List> newPerProcessMetrics = new ArrayList<>(); getProcessorMetrics(oldPipeline.getCompoundProcessor(), oldPerProcessMetrics); getProcessorMetrics(newPipeline.getCompoundProcessor(), newPerProcessMetrics); // Best attempt to populate new processor metrics using a parallel array of the old metrics. This is not ideal since // the per processor metrics may get reset when the arrays don't match. However, to get to an ideal model, unique and // consistent id's per processor and/or semantic equals for each processor will be needed. if (newPerProcessMetrics.size() == oldPerProcessMetrics.size()) { - Iterator> oldMetricsIterator = oldPerProcessMetrics.iterator(); - for (Tuple compositeMetric : newPerProcessMetrics) { + Iterator> oldMetricsIterator = oldPerProcessMetrics.iterator(); + for (Tuple compositeMetric : newPerProcessMetrics) { String type = compositeMetric.v1().getType(); - IngestMetric metric = compositeMetric.v2(); + OperationMetrics metric = compositeMetric.v2(); if (oldMetricsIterator.hasNext()) { - Tuple oldCompositeMetric = oldMetricsIterator.next(); + Tuple oldCompositeMetric = oldMetricsIterator.next(); String oldType = oldCompositeMetric.v1().getType(); - IngestMetric oldMetric = oldCompositeMetric.v2(); + OperationMetrics oldMetric = oldCompositeMetric.v2(); if (type.equals(oldType)) { metric.add(oldMetric); } diff --git a/server/src/main/java/org/opensearch/ingest/IngestStats.java b/server/src/main/java/org/opensearch/ingest/IngestStats.java index 0f6209d1c005e..ac06d779bdf2d 100644 --- a/server/src/main/java/org/opensearch/ingest/IngestStats.java +++ b/server/src/main/java/org/opensearch/ingest/IngestStats.java @@ -35,7 +35,8 @@ import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.io.stream.Writeable; -import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.metrics.OperationMetrics; +import org.opensearch.common.metrics.OperationStats; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -46,15 +47,14 @@ import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.concurrent.TimeUnit; /** - * Stats for an ingest processor pipeline + * OperationStats for an ingest processor pipeline * * @opensearch.internal */ public class IngestStats implements Writeable, ToXContentFragment { - private final Stats totalStats; + private final OperationStats totalStats; private final List pipelineStats; private final Map> processorStats; @@ -64,7 +64,7 @@ public class IngestStats implements Writeable, ToXContentFragment { * @param pipelineStats - The stats for a given ingest pipeline. * @param processorStats - The per-processor stats for a given pipeline. A map keyed by the pipeline identifier. */ - public IngestStats(Stats totalStats, List pipelineStats, Map> processorStats) { + public IngestStats(OperationStats totalStats, List pipelineStats, Map> processorStats) { this.totalStats = totalStats; this.pipelineStats = pipelineStats; this.processorStats = processorStats; @@ -74,13 +74,13 @@ public IngestStats(Stats totalStats, List pipelineStats, Map(size); this.processorStats = new HashMap<>(size); for (int i = 0; i < size; i++) { String pipelineId = in.readString(); - Stats pipelineStat = new Stats(in); + OperationStats pipelineStat = new OperationStats(in); this.pipelineStats.add(new PipelineStat(pipelineId, pipelineStat)); int processorsSize = in.readVInt(); List processorStatsPerPipeline = new ArrayList<>(processorsSize); @@ -88,7 +88,7 @@ public IngestStats(StreamInput in) throws IOException { String processorName = in.readString(); String processorType = "_NOT_AVAILABLE"; processorType = in.readString(); - Stats processorStat = new Stats(in); + OperationStats processorStat = new OperationStats(in); processorStatsPerPipeline.add(new ProcessorStat(processorName, processorType, processorStat)); } this.processorStats.put(pipelineId, processorStatsPerPipeline); @@ -148,7 +148,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - public Stats getTotalStats() { + public OperationStats getTotalStats() { return totalStats; } @@ -176,115 +176,24 @@ public int hashCode() { } /** - * The ingest statistics. - * - * @opensearch.internal - */ - public static class Stats implements Writeable, ToXContentFragment { - - private final long ingestCount; - private final long ingestTimeInMillis; - private final long ingestCurrent; - private final long ingestFailedCount; - - public Stats(long ingestCount, long ingestTimeInMillis, long ingestCurrent, long ingestFailedCount) { - this.ingestCount = ingestCount; - this.ingestTimeInMillis = ingestTimeInMillis; - this.ingestCurrent = ingestCurrent; - this.ingestFailedCount = ingestFailedCount; - } - - /** - * Read from a stream. - */ - public Stats(StreamInput in) throws IOException { - ingestCount = in.readVLong(); - ingestTimeInMillis = in.readVLong(); - ingestCurrent = in.readVLong(); - ingestFailedCount = in.readVLong(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVLong(ingestCount); - out.writeVLong(ingestTimeInMillis); - out.writeVLong(ingestCurrent); - out.writeVLong(ingestFailedCount); - } - - /** - * @return The total number of executed ingest preprocessing operations. - */ - public long getIngestCount() { - return ingestCount; - } - - /** - * @return The total time spent of ingest preprocessing in millis. - */ - public long getIngestTimeInMillis() { - return ingestTimeInMillis; - } - - /** - * @return The total number of ingest preprocessing operations currently executing. - */ - public long getIngestCurrent() { - return ingestCurrent; - } - - /** - * @return The total number of ingest preprocessing operations that have failed. - */ - public long getIngestFailedCount() { - return ingestFailedCount; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field("count", ingestCount); - builder.humanReadableField("time_in_millis", "time", new TimeValue(ingestTimeInMillis, TimeUnit.MILLISECONDS)); - builder.field("current", ingestCurrent); - builder.field("failed", ingestFailedCount); - return builder; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - IngestStats.Stats that = (IngestStats.Stats) o; - return Objects.equals(ingestCount, that.ingestCount) - && Objects.equals(ingestTimeInMillis, that.ingestTimeInMillis) - && Objects.equals(ingestFailedCount, that.ingestFailedCount) - && Objects.equals(ingestCurrent, that.ingestCurrent); - } - - @Override - public int hashCode() { - return Objects.hash(ingestCount, ingestTimeInMillis, ingestFailedCount, ingestCurrent); - } - } - - /** - * Easy conversion from scoped {@link IngestMetric} objects to a serializable Stats objects + * Easy conversion from scoped {@link OperationMetrics} objects to a serializable OperationStats objects */ static class Builder { - private Stats totalStats; + private OperationStats totalStats; private List pipelineStats = new ArrayList<>(); private Map> processorStats = new HashMap<>(); - Builder addTotalMetrics(IngestMetric totalMetric) { + Builder addTotalMetrics(OperationMetrics totalMetric) { this.totalStats = totalMetric.createStats(); return this; } - Builder addPipelineMetrics(String pipelineId, IngestMetric pipelineMetric) { + Builder addPipelineMetrics(String pipelineId, OperationMetrics pipelineMetric) { this.pipelineStats.add(new PipelineStat(pipelineId, pipelineMetric.createStats())); return this; } - Builder addProcessorMetrics(String pipelineId, String processorName, String processorType, IngestMetric metric) { + Builder addProcessorMetrics(String pipelineId, String processorName, String processorType, OperationMetrics metric) { this.processorStats.computeIfAbsent(pipelineId, k -> new ArrayList<>()) .add(new ProcessorStat(processorName, processorType, metric.createStats())); return this; @@ -300,9 +209,9 @@ IngestStats build() { */ public static class PipelineStat { private final String pipelineId; - private final Stats stats; + private final OperationStats stats; - public PipelineStat(String pipelineId, Stats stats) { + public PipelineStat(String pipelineId, OperationStats stats) { this.pipelineId = pipelineId; this.stats = stats; } @@ -311,7 +220,7 @@ public String getPipelineId() { return pipelineId; } - public Stats getStats() { + public OperationStats getStats() { return stats; } @@ -335,9 +244,9 @@ public int hashCode() { public static class ProcessorStat { private final String name; private final String type; - private final Stats stats; + private final OperationStats stats; - public ProcessorStat(String name, String type, Stats stats) { + public ProcessorStat(String name, String type, OperationStats stats) { this.name = name; this.type = type; this.stats = stats; @@ -351,7 +260,7 @@ public String getType() { return type; } - public Stats getStats() { + public OperationStats getStats() { return stats; } diff --git a/server/src/main/java/org/opensearch/ingest/Pipeline.java b/server/src/main/java/org/opensearch/ingest/Pipeline.java index 9b3725fd65d9d..766fb9cd66777 100644 --- a/server/src/main/java/org/opensearch/ingest/Pipeline.java +++ b/server/src/main/java/org/opensearch/ingest/Pipeline.java @@ -43,6 +43,7 @@ import java.util.function.BiConsumer; import java.util.function.LongSupplier; +import org.opensearch.common.metrics.OperationMetrics; import org.opensearch.script.ScriptService; /** @@ -63,7 +64,7 @@ public final class Pipeline { @Nullable private final Integer version; private final CompoundProcessor compoundProcessor; - private final IngestMetric metrics; + private final OperationMetrics metrics; private final LongSupplier relativeTimeProvider; public Pipeline(String id, @Nullable String description, @Nullable Integer version, CompoundProcessor compoundProcessor) { @@ -82,7 +83,7 @@ public Pipeline(String id, @Nullable String description, @Nullable Integer versi this.description = description; this.compoundProcessor = compoundProcessor; this.version = version; - this.metrics = new IngestMetric(); + this.metrics = new OperationMetrics(); this.relativeTimeProvider = relativeTimeProvider; } @@ -129,12 +130,12 @@ public static Pipeline create( */ public void execute(IngestDocument ingestDocument, BiConsumer handler) { final long startTimeInNanos = relativeTimeProvider.getAsLong(); - metrics.preIngest(); + metrics.before(); compoundProcessor.execute(ingestDocument, (result, e) -> { long ingestTimeInMillis = TimeUnit.NANOSECONDS.toMillis(relativeTimeProvider.getAsLong() - startTimeInNanos); - metrics.postIngest(ingestTimeInMillis); + metrics.after(ingestTimeInMillis); if (e != null) { - metrics.ingestFailed(); + metrics.failed(); } handler.accept(result, e); }); @@ -198,7 +199,7 @@ public List flattenAllProcessors() { /** * The metrics associated with this pipeline. */ - public IngestMetric getMetrics() { + public OperationMetrics getMetrics() { return metrics; } } diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 808c054de4969..aecf5659de7fe 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -56,6 +56,8 @@ import org.opensearch.monitor.fs.FsProbe; import org.opensearch.plugins.ExtensionAwarePlugin; import org.opensearch.plugins.SearchPipelinePlugin; +import org.opensearch.telemetry.tracing.NoopTracerFactory; +import org.opensearch.telemetry.tracing.TracerFactory; import org.opensearch.search.backpressure.SearchBackpressureService; import org.opensearch.search.backpressure.settings.SearchBackpressureSettings; import org.opensearch.search.pipeline.SearchPipelineService; @@ -65,6 +67,8 @@ import org.opensearch.tasks.consumer.TopNSearchTasksLogger; import org.opensearch.threadpool.RunnableTaskExecutionListener; import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory; +import org.opensearch.telemetry.TelemetryModule; +import org.opensearch.telemetry.TelemetrySettings; import org.opensearch.watcher.ResourceWatcherService; import org.opensearch.core.Assertions; import org.opensearch.Build; @@ -194,6 +198,7 @@ import org.opensearch.plugins.ScriptPlugin; import org.opensearch.plugins.SearchPlugin; import org.opensearch.plugins.SystemIndexPlugin; +import org.opensearch.plugins.TelemetryPlugin; import org.opensearch.repositories.RepositoriesModule; import org.opensearch.repositories.RepositoriesService; import org.opensearch.rest.RestController; @@ -254,6 +259,7 @@ import static java.util.stream.Collectors.toList; import static org.opensearch.common.util.FeatureFlags.SEARCH_PIPELINE; +import static org.opensearch.common.util.FeatureFlags.TELEMETRY; import static org.opensearch.env.NodeEnvironment.collectFileCacheDataPath; import static org.opensearch.index.ShardIndexingPressureSettings.SHARD_INDEXING_PRESSURE_ENABLED_ATTRIBUTE_KEY; @@ -373,6 +379,7 @@ public static class DiscoverySettings { private final Collection pluginLifecycleComponents; private final LocalNodeFactory localNodeFactory; private final NodeService nodeService; + private final TracerFactory tracerFactory; final NamedWriteableRegistry namedWriteableRegistry; private final AtomicReference runnableTaskListener; private FileCache fileCache; @@ -475,7 +482,7 @@ protected Node( for (ExtensionAwarePlugin extAwarePlugin : extensionAwarePlugins) { additionalSettings.addAll(extAwarePlugin.getExtensionSettings()); } - this.extensionsManager = new ExtensionsManager(initialEnvironment.extensionDir(), additionalSettings); + this.extensionsManager = new ExtensionsManager(additionalSettings); } else { this.extensionsManager = new NoopExtensionsManager(); } @@ -710,7 +717,8 @@ protected Node( clusterService.setRerouteService(rerouteService); final IndexStorePlugin.DirectoryFactory remoteDirectoryFactory = new RemoteSegmentStoreDirectoryFactory( - repositoriesServiceReference::get + repositoriesServiceReference::get, + threadPool ); final IndicesService indicesService = new IndicesService( @@ -803,7 +811,8 @@ protected Node( circuitBreakerService, usageService, systemIndices, - identityService + identityService, + extensionsManager ); modules.add(actionModule); @@ -1020,6 +1029,16 @@ protected Node( searchModule.getIndexSearcherExecutor(threadPool) ); + if (FeatureFlags.isEnabled(TELEMETRY)) { + final TelemetrySettings telemetrySettings = new TelemetrySettings(settings, clusterService.getClusterSettings()); + List telemetryPlugins = pluginsService.filterPlugins(TelemetryPlugin.class); + TelemetryModule telemetryModule = new TelemetryModule(telemetryPlugins, telemetrySettings); + tracerFactory = new TracerFactory(telemetrySettings, telemetryModule.getTelemetry(), threadPool.getThreadContext()); + } else { + tracerFactory = new NoopTracerFactory(); + } + resourcesToClose.add(tracerFactory::close); + final List> tasksExecutors = pluginsService.filterPlugins(PersistentTaskPlugin.class) .stream() .map( @@ -1125,6 +1144,7 @@ protected Node( b.bind(FsHealthService.class).toInstance(fsHealthService); b.bind(SystemIndices.class).toInstance(systemIndices); b.bind(IdentityService.class).toInstance(identityService); + b.bind(TracerFactory.class).toInstance(this.tracerFactory); }); injector = modules.createInjector(); @@ -1306,7 +1326,6 @@ public Node start() throws NodeValidationException { assert clusterService.localNode().equals(localNodeFactory.getNode()) : "clusterService has a different local node than the factory provided"; transportService.acceptIncomingRequests(); - extensionsManager.initialize(); discovery.startInitialJoin(); final TimeValue initialStateTimeout = DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.get(settings()); configureNodeAndClusterIdStateListener(clusterService); @@ -1481,6 +1500,9 @@ public synchronized void close() throws IOException { toClose.add(() -> stopWatch.stop().start("node_environment")); toClose.add(injector.getInstance(NodeEnvironment.class)); toClose.add(stopWatch::stop); + if (FeatureFlags.isEnabled(TELEMETRY)) { + toClose.add(injector.getInstance(TracerFactory.class)); + } if (logger.isTraceEnabled()) { toClose.add(() -> logger.trace("Close times for each service:\n{}", stopWatch.prettyPrint())); diff --git a/server/src/main/java/org/opensearch/node/NodeService.java b/server/src/main/java/org/opensearch/node/NodeService.java index 9382746081c18..6f4fe1e083ad7 100644 --- a/server/src/main/java/org/opensearch/node/NodeService.java +++ b/server/src/main/java/org/opensearch/node/NodeService.java @@ -216,7 +216,8 @@ public NodeStats stats( boolean clusterManagerThrottling, boolean weightedRoutingStats, boolean fileCacheStats, - boolean taskCancellation + boolean taskCancellation, + boolean searchPipelineStats ) { // for indices stats we want to include previous allocated shards stats as well (it will // only be applied to the sensible ones to use, like refresh/merge/flush/indexing stats) @@ -243,7 +244,8 @@ public NodeStats stats( clusterManagerThrottling ? this.clusterService.getClusterManagerService().getThrottlingStats() : null, weightedRoutingStats ? WeightedRoutingStats.getInstance() : null, fileCacheStats && fileCache != null ? fileCache.fileCacheStats() : null, - taskCancellation ? this.taskCancellationMonitoringService.stats() : null + taskCancellation ? this.taskCancellationMonitoringService.stats() : null, + searchPipelineStats ? this.searchPipelineService.stats() : null ); } diff --git a/server/src/main/java/org/opensearch/plugins/SearchPipelinePlugin.java b/server/src/main/java/org/opensearch/plugins/SearchPipelinePlugin.java index b8ceddecd3d20..3d76bab93a60c 100644 --- a/server/src/main/java/org/opensearch/plugins/SearchPipelinePlugin.java +++ b/server/src/main/java/org/opensearch/plugins/SearchPipelinePlugin.java @@ -9,6 +9,7 @@ package org.opensearch.plugins; import org.opensearch.search.pipeline.Processor; +import org.opensearch.search.pipeline.SearchPhaseResultsProcessor; import org.opensearch.search.pipeline.SearchRequestProcessor; import org.opensearch.search.pipeline.SearchResponseProcessor; @@ -42,4 +43,15 @@ default Map> getRequestProcess default Map> getResponseProcessors(Processor.Parameters parameters) { return Collections.emptyMap(); } + + /** + * Returns additional search pipeline search phase results processor types added by this plugin. + * + * The key of the returned {@link Map} is the unique name for the processor which is specified + * in pipeline configurations, and the value is a {@link org.opensearch.search.pipeline.Processor.Factory} + * to create the processor from a given pipeline configuration. + */ + default Map> getSearchPhaseResultsProcessors(Processor.Parameters parameters) { + return Collections.emptyMap(); + } } diff --git a/server/src/main/java/org/opensearch/plugins/TelemetryPlugin.java b/server/src/main/java/org/opensearch/plugins/TelemetryPlugin.java new file mode 100644 index 0000000000000..33dc9b7a0c843 --- /dev/null +++ b/server/src/main/java/org/opensearch/plugins/TelemetryPlugin.java @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugins; + +import org.opensearch.telemetry.Telemetry; +import org.opensearch.telemetry.TelemetrySettings; + +import java.util.Optional; + +/** + * Plugin for extending telemetry related classes + */ +public interface TelemetryPlugin { + + Optional getTelemetry(TelemetrySettings settings); + + String getName(); + +} diff --git a/server/src/main/java/org/opensearch/repositories/FilterRepository.java b/server/src/main/java/org/opensearch/repositories/FilterRepository.java index 88e14a4dff3a0..b108e2da1ab04 100644 --- a/server/src/main/java/org/opensearch/repositories/FilterRepository.java +++ b/server/src/main/java/org/opensearch/repositories/FilterRepository.java @@ -45,6 +45,7 @@ import org.opensearch.index.mapper.MapperService; import org.opensearch.index.shard.ShardId; import org.opensearch.index.snapshots.IndexShardSnapshotStatus; +import org.opensearch.index.snapshots.blobstore.RemoteStoreShardShallowCopySnapshot; import org.opensearch.index.store.Store; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.snapshots.SnapshotId; @@ -219,6 +220,15 @@ public void restoreShard( in.restoreShard(store, snapshotId, indexId, snapshotShardId, recoveryState, listener); } + @Override + public RemoteStoreShardShallowCopySnapshot getRemoteStoreShallowCopyShardMetadata( + SnapshotId snapshotId, + IndexId indexId, + ShardId snapshotShardId + ) { + return in.getRemoteStoreShallowCopyShardMetadata(snapshotId, indexId, snapshotShardId); + } + @Override public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, IndexId indexId, ShardId shardId) { return in.getShardSnapshotStatus(snapshotId, indexId, shardId); diff --git a/server/src/main/java/org/opensearch/repositories/Repository.java b/server/src/main/java/org/opensearch/repositories/Repository.java index 045b7ad348a76..c08369b79452d 100644 --- a/server/src/main/java/org/opensearch/repositories/Repository.java +++ b/server/src/main/java/org/opensearch/repositories/Repository.java @@ -46,6 +46,7 @@ import org.opensearch.index.mapper.MapperService; import org.opensearch.index.shard.ShardId; import org.opensearch.index.snapshots.IndexShardSnapshotStatus; +import org.opensearch.index.snapshots.blobstore.RemoteStoreShardShallowCopySnapshot; import org.opensearch.index.store.Store; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.snapshots.SnapshotId; @@ -304,6 +305,22 @@ void restoreShard( ActionListener listener ); + /** + * Returns Snapshot Shard Metadata for remote store interop enabled snapshot. + *

+ * The index can be renamed on restore, hence different {@code shardId} and {@code snapshotShardId} are supplied. + * @param snapshotId snapshot id + * @param indexId id of the index in the repository from which the restore is occurring + * @param snapshotShardId shard id (in the snapshot) + */ + default RemoteStoreShardShallowCopySnapshot getRemoteStoreShallowCopyShardMetadata( + SnapshotId snapshotId, + IndexId indexId, + ShardId snapshotShardId + ) { + throw new UnsupportedOperationException(); + } + /** * Retrieve shard snapshot status for the stored snapshot * diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index c0d6f49a5ce0d..f04bf83c2f1d1 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -2789,6 +2789,16 @@ public InputStream maybeRateLimitSnapshots(InputStream stream) { return maybeRateLimit(stream, () -> snapshotRateLimiter, snapshotRateLimitingTimeInNanos); } + @Override + public RemoteStoreShardShallowCopySnapshot getRemoteStoreShallowCopyShardMetadata( + SnapshotId snapshotId, + IndexId indexId, + ShardId snapshotShardId + ) { + final BlobContainer container = shardContainer(indexId, snapshotShardId); + return loadShallowCopyShardSnapshot(container, snapshotId); + } + @Override public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, IndexId indexId, ShardId shardId) { BlobStoreIndexShardSnapshot snapshot = loadShardSnapshot(shardContainer(indexId, shardId), snapshotId); diff --git a/server/src/main/java/org/opensearch/search/SearchService.java b/server/src/main/java/org/opensearch/search/SearchService.java index 7d67c6c3b45f4..9daad9112e473 100644 --- a/server/src/main/java/org/opensearch/search/SearchService.java +++ b/server/src/main/java/org/opensearch/search/SearchService.java @@ -1550,7 +1550,9 @@ private CanMatchResponse canMatch(ShardSearchRequest request, boolean checkRefre } public static boolean canMatchSearchAfter(FieldDoc searchAfter, MinAndMax minMax, FieldSortBuilder primarySortField) { - if (searchAfter != null && minMax != null && primarySortField != null) { + // Check for sort.missing == null, since in case of missing values sort queries, if segment/shard's min/max + // is out of search_after range, it still should be printed and hence we should not skip segment/shard. + if (searchAfter != null && minMax != null && primarySortField != null && primarySortField.missing() == null) { final Object searchAfterPrimary = searchAfter.fields[0]; if (primarySortField.order() == SortOrder.DESC) { if (minMax.compareMin(searchAfterPrimary) > 0) { diff --git a/server/src/main/java/org/opensearch/search/aggregations/AggregatorFactories.java b/server/src/main/java/org/opensearch/search/aggregations/AggregatorFactories.java index f760070a9b650..e57776fd78c49 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/AggregatorFactories.java +++ b/server/src/main/java/org/opensearch/search/aggregations/AggregatorFactories.java @@ -235,7 +235,7 @@ private static AggregatorFactories.Builder parseAggregators(XContentParser parse } } - return factories; + return factories.count() > 0 ? factories : null; } public static final AggregatorFactories EMPTY = new AggregatorFactories(new AggregatorFactory[0]); diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/LongKeyedBucketOrds.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/LongKeyedBucketOrds.java index 6b511587cc271..a55b7d6bc154e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/LongKeyedBucketOrds.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/LongKeyedBucketOrds.java @@ -33,7 +33,7 @@ package org.opensearch.search.aggregations.bucket.terms; import org.opensearch.common.util.BigArrays; -import org.opensearch.common.util.LongHash; +import org.opensearch.common.util.ReorganizingLongHash; import org.opensearch.common.util.LongLongHash; import org.opensearch.common.lease.Releasable; import org.opensearch.search.aggregations.CardinalityUpperBound; @@ -148,10 +148,10 @@ public long value() { * @opensearch.internal */ public static class FromSingle extends LongKeyedBucketOrds { - private final LongHash ords; + private final ReorganizingLongHash ords; public FromSingle(BigArrays bigArrays) { - ords = new LongHash(1, bigArrays); + ords = new ReorganizingLongHash(bigArrays); } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregator.java index c810ba8f38624..fccb9c3af5986 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregator.java @@ -196,7 +196,20 @@ InternalMultiTerms buildResult(long owningBucketOrd, long otherDocCount, Interna @Override public InternalAggregation buildEmptyAggregation() { - return null; + return new InternalMultiTerms( + name, + order, + order, + bucketCountThresholds.getRequiredSize(), + bucketCountThresholds.getMinDocCount(), + metadata(), + bucketCountThresholds.getShardSize(), + showTermDocCountError, + 0, + 0, + formats, + Collections.emptyList() + ); } @Override diff --git a/server/src/main/java/org/opensearch/search/pipeline/Pipeline.java b/server/src/main/java/org/opensearch/search/pipeline/Pipeline.java index c9a5f865d507e..92826eee5a4f4 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/Pipeline.java +++ b/server/src/main/java/org/opensearch/search/pipeline/Pipeline.java @@ -8,7 +8,8 @@ package org.opensearch.search.pipeline; -import org.opensearch.OpenSearchParseException; +import org.opensearch.action.search.SearchPhaseContext; +import org.opensearch.action.search.SearchPhaseResults; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.Nullable; @@ -16,17 +17,12 @@ import org.opensearch.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.ingest.ConfigurationUtils; +import org.opensearch.search.SearchPhaseResult; -import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.List; -import java.util.Map; - -import static org.opensearch.ingest.ConfigurationUtils.TAG_KEY; -import static org.opensearch.ingest.Pipeline.DESCRIPTION_KEY; -import static org.opensearch.ingest.Pipeline.VERSION_KEY; +import java.util.concurrent.TimeUnit; +import java.util.function.LongSupplier; /** * Concrete representation of a search pipeline, holding multiple processors. @@ -35,6 +31,7 @@ class Pipeline { public static final String REQUEST_PROCESSORS_KEY = "request_processors"; public static final String RESPONSE_PROCESSORS_KEY = "response_processors"; + public static final String PHASE_PROCESSORS_KEY = "phase_results_processors"; private final String id; private final String description; private final Integer version; @@ -43,75 +40,28 @@ class Pipeline { // Then these can be CompoundProcessors instead of lists. private final List searchRequestProcessors; private final List searchResponseProcessors; - + private final List searchPhaseResultsProcessors; private final NamedWriteableRegistry namedWriteableRegistry; + private final LongSupplier relativeTimeSupplier; - private Pipeline( + Pipeline( String id, @Nullable String description, @Nullable Integer version, List requestProcessors, List responseProcessors, - NamedWriteableRegistry namedWriteableRegistry + List phaseResultsProcessors, + NamedWriteableRegistry namedWriteableRegistry, + LongSupplier relativeTimeSupplier ) { this.id = id; this.description = description; this.version = version; - this.searchRequestProcessors = requestProcessors; - this.searchResponseProcessors = responseProcessors; + this.searchRequestProcessors = Collections.unmodifiableList(requestProcessors); + this.searchResponseProcessors = Collections.unmodifiableList(responseProcessors); + this.searchPhaseResultsProcessors = Collections.unmodifiableList(phaseResultsProcessors); this.namedWriteableRegistry = namedWriteableRegistry; - } - - static Pipeline create( - String id, - Map config, - Map> requestProcessorFactories, - Map> responseProcessorFactories, - NamedWriteableRegistry namedWriteableRegistry - ) throws Exception { - String description = ConfigurationUtils.readOptionalStringProperty(null, null, config, DESCRIPTION_KEY); - Integer version = ConfigurationUtils.readIntProperty(null, null, config, VERSION_KEY, null); - List> requestProcessorConfigs = ConfigurationUtils.readOptionalList(null, null, config, REQUEST_PROCESSORS_KEY); - List requestProcessors = readProcessors(requestProcessorFactories, requestProcessorConfigs); - List> responseProcessorConfigs = ConfigurationUtils.readOptionalList( - null, - null, - config, - RESPONSE_PROCESSORS_KEY - ); - List responseProcessors = readProcessors(responseProcessorFactories, responseProcessorConfigs); - if (config.isEmpty() == false) { - throw new OpenSearchParseException( - "pipeline [" - + id - + "] doesn't support one or more provided configuration parameters " - + Arrays.toString(config.keySet().toArray()) - ); - } - return new Pipeline(id, description, version, requestProcessors, responseProcessors, namedWriteableRegistry); - } - - private static List readProcessors( - Map> processorFactories, - List> requestProcessorConfigs - ) throws Exception { - List processors = new ArrayList<>(); - if (requestProcessorConfigs == null) { - return processors; - } - for (Map processorConfigWithKey : requestProcessorConfigs) { - for (Map.Entry entry : processorConfigWithKey.entrySet()) { - String type = entry.getKey(); - if (!processorFactories.containsKey(type)) { - throw new IllegalArgumentException("Invalid processor type " + type); - } - Map config = (Map) entry.getValue(); - String tag = ConfigurationUtils.readOptionalStringProperty(null, null, config, TAG_KEY); - String description = ConfigurationUtils.readOptionalStringProperty(null, tag, config, DESCRIPTION_KEY); - processors.add(processorFactories.get(type).create(processorFactories, tag, description, config)); - } - } - return Collections.unmodifiableList(processors); + this.relativeTimeSupplier = relativeTimeSupplier; } String getId() { @@ -134,30 +84,115 @@ List getSearchResponseProcessors() { return searchResponseProcessors; } - SearchRequest transformRequest(SearchRequest request) throws Exception { + List getSearchPhaseResultsProcessors() { + return searchPhaseResultsProcessors; + } + + protected void beforeTransformRequest() {} + + protected void afterTransformRequest(long timeInNanos) {} + + protected void onTransformRequestFailure() {} + + protected void beforeRequestProcessor(Processor processor) {} + + protected void afterRequestProcessor(Processor processor, long timeInNanos) {} + + protected void onRequestProcessorFailed(Processor processor) {} + + protected void beforeTransformResponse() {} + + protected void afterTransformResponse(long timeInNanos) {} + + protected void onTransformResponseFailure() {} + + protected void beforeResponseProcessor(Processor processor) {} + + protected void afterResponseProcessor(Processor processor, long timeInNanos) {} + + protected void onResponseProcessorFailed(Processor processor) {} + + SearchRequest transformRequest(SearchRequest request) throws SearchPipelineProcessingException { if (searchRequestProcessors.isEmpty() == false) { - try (BytesStreamOutput bytesStreamOutput = new BytesStreamOutput()) { - request.writeTo(bytesStreamOutput); - try (StreamInput in = bytesStreamOutput.bytes().streamInput()) { - try (StreamInput input = new NamedWriteableAwareStreamInput(in, namedWriteableRegistry)) { - request = new SearchRequest(input); + long pipelineStart = relativeTimeSupplier.getAsLong(); + beforeTransformRequest(); + try { + try (BytesStreamOutput bytesStreamOutput = new BytesStreamOutput()) { + request.writeTo(bytesStreamOutput); + try (StreamInput in = bytesStreamOutput.bytes().streamInput()) { + try (StreamInput input = new NamedWriteableAwareStreamInput(in, namedWriteableRegistry)) { + request = new SearchRequest(input); + } } } - } - for (SearchRequestProcessor searchRequestProcessor : searchRequestProcessors) { - request = searchRequestProcessor.processRequest(request); + for (SearchRequestProcessor processor : searchRequestProcessors) { + beforeRequestProcessor(processor); + long start = relativeTimeSupplier.getAsLong(); + try { + request = processor.processRequest(request); + } catch (Exception e) { + onRequestProcessorFailed(processor); + throw e; + } finally { + long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - start); + afterRequestProcessor(processor, took); + } + } + } catch (Exception e) { + onTransformRequestFailure(); + throw new SearchPipelineProcessingException(e); + } finally { + long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - pipelineStart); + afterTransformRequest(took); } } return request; } SearchResponse transformResponse(SearchRequest request, SearchResponse response) throws SearchPipelineProcessingException { + if (searchResponseProcessors.isEmpty() == false) { + long pipelineStart = relativeTimeSupplier.getAsLong(); + beforeTransformResponse(); + try { + for (SearchResponseProcessor processor : searchResponseProcessors) { + beforeResponseProcessor(processor); + long start = relativeTimeSupplier.getAsLong(); + try { + response = processor.processResponse(request, response); + } catch (Exception e) { + onResponseProcessorFailed(processor); + throw e; + } finally { + long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - start); + afterResponseProcessor(processor, took); + } + } + } catch (Exception e) { + onTransformResponseFailure(); + throw new SearchPipelineProcessingException(e); + } finally { + long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - pipelineStart); + afterTransformResponse(took); + } + } + return response; + } + + void runSearchPhaseResultsTransformer( + SearchPhaseResults searchPhaseResult, + SearchPhaseContext context, + String currentPhase, + String nextPhase + ) throws SearchPipelineProcessingException { + try { - for (SearchResponseProcessor responseProcessor : searchResponseProcessors) { - response = responseProcessor.processResponse(request, response); + for (SearchPhaseResultsProcessor searchPhaseResultsProcessor : searchPhaseResultsProcessors) { + if (currentPhase.equals(searchPhaseResultsProcessor.getBeforePhase().getName()) + && nextPhase.equals(searchPhaseResultsProcessor.getAfterPhase().getName())) { + searchPhaseResultsProcessor.process(searchPhaseResult, context); + } } - return response; - } catch (Exception e) { + } catch (RuntimeException e) { throw new SearchPipelineProcessingException(e); } } @@ -168,6 +203,8 @@ SearchResponse transformResponse(SearchRequest request, SearchResponse response) 0, Collections.emptyList(), Collections.emptyList(), - null + Collections.emptyList(), + null, + () -> 0L ); } diff --git a/server/src/main/java/org/opensearch/search/pipeline/PipelineWithMetrics.java b/server/src/main/java/org/opensearch/search/pipeline/PipelineWithMetrics.java new file mode 100644 index 0000000000000..612e979e56070 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/pipeline/PipelineWithMetrics.java @@ -0,0 +1,249 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline; + +import org.opensearch.OpenSearchParseException; +import org.opensearch.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.metrics.OperationMetrics; +import org.opensearch.ingest.ConfigurationUtils; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.LongSupplier; + +import static org.opensearch.ingest.ConfigurationUtils.TAG_KEY; +import static org.opensearch.ingest.Pipeline.DESCRIPTION_KEY; +import static org.opensearch.ingest.Pipeline.VERSION_KEY; + +/** + * Specialization of {@link Pipeline} that adds metrics to track executions of the pipeline and individual processors. + */ +class PipelineWithMetrics extends Pipeline { + + private final OperationMetrics totalRequestMetrics; + private final OperationMetrics totalResponseMetrics; + private final OperationMetrics pipelineRequestMetrics = new OperationMetrics(); + private final OperationMetrics pipelineResponseMetrics = new OperationMetrics(); + private final Map requestProcessorMetrics = new HashMap<>(); + private final Map responseProcessorMetrics = new HashMap<>(); + + PipelineWithMetrics( + String id, + String description, + Integer version, + List requestProcessors, + List responseProcessors, + List phaseResultsProcessors, + NamedWriteableRegistry namedWriteableRegistry, + OperationMetrics totalRequestMetrics, + OperationMetrics totalResponseMetrics, + LongSupplier relativeTimeSupplier + ) { + super( + id, + description, + version, + requestProcessors, + responseProcessors, + phaseResultsProcessors, + namedWriteableRegistry, + relativeTimeSupplier + ); + this.totalRequestMetrics = totalRequestMetrics; + this.totalResponseMetrics = totalResponseMetrics; + for (Processor requestProcessor : getSearchRequestProcessors()) { + requestProcessorMetrics.putIfAbsent(getProcessorKey(requestProcessor), new OperationMetrics()); + } + for (Processor responseProcessor : getSearchResponseProcessors()) { + responseProcessorMetrics.putIfAbsent(getProcessorKey(responseProcessor), new OperationMetrics()); + } + } + + static PipelineWithMetrics create( + String id, + Map config, + Map> requestProcessorFactories, + Map> responseProcessorFactories, + Map> phaseResultsProcessorFactories, + NamedWriteableRegistry namedWriteableRegistry, + OperationMetrics totalRequestProcessingMetrics, + OperationMetrics totalResponseProcessingMetrics + ) throws Exception { + String description = ConfigurationUtils.readOptionalStringProperty(null, null, config, DESCRIPTION_KEY); + Integer version = ConfigurationUtils.readIntProperty(null, null, config, VERSION_KEY, null); + List> requestProcessorConfigs = ConfigurationUtils.readOptionalList(null, null, config, REQUEST_PROCESSORS_KEY); + List requestProcessors = readProcessors(requestProcessorFactories, requestProcessorConfigs); + List> responseProcessorConfigs = ConfigurationUtils.readOptionalList( + null, + null, + config, + RESPONSE_PROCESSORS_KEY + ); + List responseProcessors = readProcessors(responseProcessorFactories, responseProcessorConfigs); + List> phaseResultsProcessorConfigs = ConfigurationUtils.readOptionalList( + null, + null, + config, + PHASE_PROCESSORS_KEY + ); + List phaseResultsProcessors = readProcessors( + phaseResultsProcessorFactories, + phaseResultsProcessorConfigs + ); + if (config.isEmpty() == false) { + throw new OpenSearchParseException( + "pipeline [" + + id + + "] doesn't support one or more provided configuration parameters " + + Arrays.toString(config.keySet().toArray()) + ); + } + return new PipelineWithMetrics( + id, + description, + version, + requestProcessors, + responseProcessors, + phaseResultsProcessors, + namedWriteableRegistry, + totalRequestProcessingMetrics, + totalResponseProcessingMetrics, + System::nanoTime + ); + + } + + private static List readProcessors( + Map> processorFactories, + List> requestProcessorConfigs + ) throws Exception { + List processors = new ArrayList<>(); + if (requestProcessorConfigs == null) { + return processors; + } + for (Map processorConfigWithKey : requestProcessorConfigs) { + for (Map.Entry entry : processorConfigWithKey.entrySet()) { + String type = entry.getKey(); + if (!processorFactories.containsKey(type)) { + throw new IllegalArgumentException("Invalid processor type " + type); + } + Map config = (Map) entry.getValue(); + String tag = ConfigurationUtils.readOptionalStringProperty(null, null, config, TAG_KEY); + String description = ConfigurationUtils.readOptionalStringProperty(null, tag, config, DESCRIPTION_KEY); + processors.add(processorFactories.get(type).create(processorFactories, tag, description, config)); + } + } + return Collections.unmodifiableList(processors); + } + + @Override + protected void beforeTransformRequest() { + super.beforeTransformRequest(); + totalRequestMetrics.before(); + pipelineRequestMetrics.before(); + } + + @Override + protected void afterTransformRequest(long timeInNanos) { + super.afterTransformRequest(timeInNanos); + totalRequestMetrics.after(timeInNanos); + pipelineRequestMetrics.after(timeInNanos); + } + + @Override + protected void onTransformRequestFailure() { + super.onTransformRequestFailure(); + totalRequestMetrics.failed(); + pipelineRequestMetrics.failed(); + } + + protected void beforeRequestProcessor(Processor processor) { + requestProcessorMetrics.get(getProcessorKey(processor)).before(); + } + + protected void afterRequestProcessor(Processor processor, long timeInNanos) { + requestProcessorMetrics.get(getProcessorKey(processor)).after(timeInNanos); + } + + protected void onRequestProcessorFailed(Processor processor) { + requestProcessorMetrics.get(getProcessorKey(processor)).failed(); + } + + protected void beforeTransformResponse() { + super.beforeTransformRequest(); + totalResponseMetrics.before(); + pipelineResponseMetrics.before(); + } + + protected void afterTransformResponse(long timeInNanos) { + super.afterTransformResponse(timeInNanos); + totalResponseMetrics.after(timeInNanos); + pipelineResponseMetrics.after(timeInNanos); + } + + protected void onTransformResponseFailure() { + super.onTransformResponseFailure(); + totalResponseMetrics.failed(); + pipelineResponseMetrics.failed(); + } + + protected void beforeResponseProcessor(Processor processor) { + responseProcessorMetrics.get(getProcessorKey(processor)).before(); + } + + protected void afterResponseProcessor(Processor processor, long timeInNanos) { + responseProcessorMetrics.get(getProcessorKey(processor)).after(timeInNanos); + } + + protected void onResponseProcessorFailed(Processor processor) { + responseProcessorMetrics.get(getProcessorKey(processor)).failed(); + } + + void copyMetrics(PipelineWithMetrics oldPipeline) { + pipelineRequestMetrics.add(oldPipeline.pipelineRequestMetrics); + pipelineResponseMetrics.add(oldPipeline.pipelineResponseMetrics); + copyProcessorMetrics(requestProcessorMetrics, oldPipeline.requestProcessorMetrics); + copyProcessorMetrics(responseProcessorMetrics, oldPipeline.responseProcessorMetrics); + } + + private static void copyProcessorMetrics( + Map newProcessorMetrics, + Map oldProcessorMetrics + ) { + for (Map.Entry oldProcessorMetric : oldProcessorMetrics.entrySet()) { + if (newProcessorMetrics.containsKey(oldProcessorMetric.getKey())) { + newProcessorMetrics.get(oldProcessorMetric.getKey()).add(oldProcessorMetric.getValue()); + } + } + } + + private static String getProcessorKey(Processor processor) { + String key = processor.getType(); + if (processor.getTag() != null) { + return key + ":" + processor.getTag(); + } + return key; + } + + void populateStats(SearchPipelineStats.Builder statsBuilder) { + statsBuilder.addPipelineStats(getId(), pipelineRequestMetrics, pipelineResponseMetrics); + for (Processor processor : getSearchRequestProcessors()) { + String key = getProcessorKey(processor); + statsBuilder.addRequestProcessorStats(getId(), key, processor.getType(), requestProcessorMetrics.get(key)); + } + for (Processor processor : getSearchResponseProcessors()) { + String key = getProcessorKey(processor); + statsBuilder.addResponseProcessorStats(getId(), key, processor.getType(), responseProcessorMetrics.get(key)); + } + } +} diff --git a/server/src/main/java/org/opensearch/search/pipeline/PipelinedRequest.java b/server/src/main/java/org/opensearch/search/pipeline/PipelinedRequest.java index 0cfff013f4021..5a7539808c127 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/PipelinedRequest.java +++ b/server/src/main/java/org/opensearch/search/pipeline/PipelinedRequest.java @@ -8,29 +8,36 @@ package org.opensearch.search.pipeline; +import org.opensearch.action.search.SearchPhaseContext; +import org.opensearch.action.search.SearchPhaseResults; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchResponse; +import org.opensearch.search.SearchPhaseResult; /** * Groups a search pipeline based on a request and the request after being transformed by the pipeline. * * @opensearch.internal */ -public final class PipelinedRequest { +public final class PipelinedRequest extends SearchRequest { private final Pipeline pipeline; - private final SearchRequest transformedRequest; PipelinedRequest(Pipeline pipeline, SearchRequest transformedRequest) { + super(transformedRequest); this.pipeline = pipeline; - this.transformedRequest = transformedRequest; } public SearchResponse transformResponse(SearchResponse response) { - return pipeline.transformResponse(transformedRequest, response); + return pipeline.transformResponse(this, response); } - public SearchRequest transformedRequest() { - return transformedRequest; + public void transformSearchPhaseResults( + final SearchPhaseResults searchPhaseResult, + final SearchPhaseContext searchPhaseContext, + final String currentPhase, + final String nextPhase + ) { + pipeline.runSearchPhaseResultsTransformer(searchPhaseResult, searchPhaseContext, currentPhase, nextPhase); } // Visible for testing diff --git a/server/src/main/java/org/opensearch/search/pipeline/SearchPhaseResultsProcessor.java b/server/src/main/java/org/opensearch/search/pipeline/SearchPhaseResultsProcessor.java new file mode 100644 index 0000000000000..772dc8758bace --- /dev/null +++ b/server/src/main/java/org/opensearch/search/pipeline/SearchPhaseResultsProcessor.java @@ -0,0 +1,47 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline; + +import org.opensearch.action.search.SearchPhaseContext; +import org.opensearch.action.search.SearchPhaseName; +import org.opensearch.action.search.SearchPhaseResults; +import org.opensearch.search.SearchPhaseResult; +import org.opensearch.search.internal.SearchContext; + +/** + * Creates a processor that runs between Phases of the Search. + * @opensearch.api + */ +public interface SearchPhaseResultsProcessor extends Processor { + + /** + * Processes the {@link SearchPhaseResults} obtained from a SearchPhase which will be returned to next + * SearchPhase. + * @param searchPhaseResult {@link SearchPhaseResults} + * @param searchPhaseContext {@link SearchContext} + * @param {@link SearchPhaseResult} + */ + void process( + final SearchPhaseResults searchPhaseResult, + final SearchPhaseContext searchPhaseContext + ); + + /** + * The phase which should have run before, this processor can start executing. + * @return {@link SearchPhaseName} + */ + SearchPhaseName getBeforePhase(); + + /** + * The phase which should run after, this processor execution. + * @return {@link SearchPhaseName} + */ + SearchPhaseName getAfterPhase(); + +} diff --git a/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java b/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java index 87c09bd971284..70dc8546a077f 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java +++ b/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java @@ -30,6 +30,7 @@ import org.opensearch.cluster.service.ClusterManagerTaskThrottler; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.metrics.OperationMetrics; import org.opensearch.common.regex.Regex; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; @@ -72,6 +73,7 @@ public class SearchPipelineService implements ClusterStateApplier, ReportingServ private final ScriptService scriptService; private final Map> requestProcessorFactories; private final Map> responseProcessorFactories; + private final Map> phaseInjectorProcessorFactories; private volatile Map pipelines = Collections.emptyMap(); private final ThreadPool threadPool; private final List> searchPipelineClusterStateListeners = new CopyOnWriteArrayList<>(); @@ -80,6 +82,9 @@ public class SearchPipelineService implements ClusterStateApplier, ReportingServ private final NamedWriteableRegistry namedWriteableRegistry; private volatile ClusterState state; + private final OperationMetrics totalRequestProcessingMetrics = new OperationMetrics(); + private final OperationMetrics totalResponseProcessingMetrics = new OperationMetrics(); + private final boolean isEnabled; public SearchPipelineService( @@ -112,6 +117,10 @@ public SearchPipelineService( ); this.requestProcessorFactories = processorFactories(searchPipelinePlugins, p -> p.getRequestProcessors(parameters)); this.responseProcessorFactories = processorFactories(searchPipelinePlugins, p -> p.getResponseProcessors(parameters)); + this.phaseInjectorProcessorFactories = processorFactories( + searchPipelinePlugins, + p -> p.getSearchPhaseResultsProcessors(parameters) + ); putPipelineTaskKey = clusterService.registerClusterManagerTask(ClusterManagerTaskKeys.PUT_SEARCH_PIPELINE_KEY, true); deletePipelineTaskKey = clusterService.registerClusterManagerTask(ClusterManagerTaskKeys.DELETE_SEARCH_PIPELINE_KEY, true); this.isEnabled = isEnabled; @@ -172,26 +181,27 @@ void innerUpdatePipelines(SearchPipelineMetadata newSearchPipelineMetadata) { newPipelines = new HashMap<>(existingPipelines); } try { - Pipeline newPipeline = Pipeline.create( + PipelineWithMetrics newPipeline = PipelineWithMetrics.create( newConfiguration.getId(), newConfiguration.getConfigAsMap(), requestProcessorFactories, responseProcessorFactories, - namedWriteableRegistry + phaseInjectorProcessorFactories, + namedWriteableRegistry, + totalRequestProcessingMetrics, + totalResponseProcessingMetrics ); newPipelines.put(newConfiguration.getId(), new PipelineHolder(newConfiguration, newPipeline)); - if (previous == null) { - continue; + if (previous != null) { + newPipeline.copyMetrics(previous.pipeline); } - // TODO -- once we add in pipeline metrics (like in ingest pipelines), we will need to deep-copy - // the old pipeline's metrics into the new pipeline. } catch (Exception e) { OpenSearchParseException parseException = new OpenSearchParseException( "Error updating pipeline with id [" + newConfiguration.getId() + "]", e ); - // TODO -- replace pipeline with one that throws an exception when we try to use it + // TODO -- replace pipeline with one that throws this exception when we try to use it if (exceptions == null) { exceptions = new ArrayList<>(); } @@ -271,12 +281,15 @@ void validatePipeline(Map searchPipelineInfos throw new IllegalStateException("Search pipeline info is empty"); } Map pipelineConfig = XContentHelper.convertToMap(request.getSource(), false, request.getXContentType()).v2(); - Pipeline pipeline = Pipeline.create( + Pipeline pipeline = PipelineWithMetrics.create( request.getId(), pipelineConfig, requestProcessorFactories, responseProcessorFactories, - namedWriteableRegistry + phaseInjectorProcessorFactories, + namedWriteableRegistry, + new OperationMetrics(), // Use ephemeral metrics for validation + new OperationMetrics() ); List exceptions = new ArrayList<>(); for (SearchRequestProcessor processor : pipeline.getSearchRequestProcessors()) { @@ -353,7 +366,7 @@ static ClusterState innerDelete(DeleteSearchPipelineRequest request, ClusterStat return newState.build(); } - public PipelinedRequest resolvePipeline(SearchRequest searchRequest) throws Exception { + public PipelinedRequest resolvePipeline(SearchRequest searchRequest) { Pipeline pipeline = Pipeline.NO_OP_PIPELINE; if (isEnabled == false) { @@ -367,12 +380,15 @@ public PipelinedRequest resolvePipeline(SearchRequest searchRequest) throws Exce ); } try { - pipeline = Pipeline.create( + pipeline = PipelineWithMetrics.create( AD_HOC_PIPELINE_ID, searchRequest.source().searchPipelineSource(), requestProcessorFactories, responseProcessorFactories, - namedWriteableRegistry + phaseInjectorProcessorFactories, + namedWriteableRegistry, + totalRequestProcessingMetrics, + totalResponseProcessingMetrics ); } catch (Exception e) { throw new SearchPipelineProcessingException(e); @@ -400,12 +416,8 @@ public PipelinedRequest resolvePipeline(SearchRequest searchRequest) throws Exce pipeline = pipelineHolder.pipeline; } } - try { - SearchRequest transformedRequest = pipeline.transformRequest(searchRequest); - return new PipelinedRequest(pipeline, transformedRequest); - } catch (Exception e) { - throw new SearchPipelineProcessingException(e); - } + SearchRequest transformedRequest = pipeline.transformRequest(searchRequest); + return new PipelinedRequest(pipeline, transformedRequest); } Map> getRequestProcessorFactories() { @@ -431,6 +443,16 @@ public SearchPipelineInfo info() { ); } + public SearchPipelineStats stats() { + SearchPipelineStats.Builder builder = new SearchPipelineStats.Builder(); + builder.withTotalStats(totalRequestProcessingMetrics, totalResponseProcessingMetrics); + for (PipelineHolder pipelineHolder : pipelines.values()) { + PipelineWithMetrics pipeline = pipelineHolder.pipeline; + pipeline.populateStats(builder); + } + return builder.build(); + } + public static List getPipelines(ClusterState clusterState, String... ids) { SearchPipelineMetadata metadata = clusterState.getMetadata().custom(SearchPipelineMetadata.TYPE); return innerGetPipelines(metadata, ids); @@ -474,9 +496,9 @@ Map getPipelines() { static class PipelineHolder { final PipelineConfiguration configuration; - final Pipeline pipeline; + final PipelineWithMetrics pipeline; - PipelineHolder(PipelineConfiguration configuration, Pipeline pipeline) { + PipelineHolder(PipelineConfiguration configuration, PipelineWithMetrics pipeline) { this.configuration = Objects.requireNonNull(configuration); this.pipeline = Objects.requireNonNull(pipeline); } diff --git a/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineStats.java b/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineStats.java new file mode 100644 index 0000000000000..4261bfe99160a --- /dev/null +++ b/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineStats.java @@ -0,0 +1,367 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline; + +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.io.stream.Writeable; +import org.opensearch.common.metrics.OperationMetrics; +import org.opensearch.common.metrics.OperationStats; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.TreeMap; + +import static java.util.Collections.emptyList; +import static java.util.Collections.unmodifiableList; +import static java.util.Collections.unmodifiableMap; + +/** + * Serializable, immutable search pipeline statistics to be returned via stats APIs. + * + * @opensearch.internal + */ +public class SearchPipelineStats implements Writeable, ToXContentFragment { + + private final OperationStats totalRequestStats; + private final OperationStats totalResponseStats; + private final List perPipelineStats; + private final Map perPipelineProcessorStats; + + public SearchPipelineStats( + OperationStats totalRequestStats, + OperationStats totalResponseStats, + List perPipelineStats, + Map perPipelineProcessorStats + ) { + this.totalRequestStats = totalRequestStats; + this.totalResponseStats = totalResponseStats; + this.perPipelineStats = perPipelineStats; + this.perPipelineProcessorStats = perPipelineProcessorStats; + } + + public SearchPipelineStats(StreamInput in) throws IOException { + this.totalRequestStats = new OperationStats(in); + this.totalResponseStats = new OperationStats(in); + int size = in.readVInt(); + List perPipelineStats = new ArrayList<>(size); + Map pipelineDetailStatsMap = new TreeMap<>(); + for (int i = 0; i < size; i++) { + String pipelineId = in.readString(); + OperationStats pipelineRequestStats = new OperationStats(in); + OperationStats pipelineResponseStats = new OperationStats(in); + perPipelineStats.add(new PerPipelineStats(pipelineId, pipelineRequestStats, pipelineResponseStats)); + int numRequestProcessors = in.readVInt(); + List requestProcessorStats = new ArrayList<>(numRequestProcessors); + for (int j = 0; j < numRequestProcessors; j++) { + String processorName = in.readString(); + String processorType = in.readString(); + OperationStats processorStats = new OperationStats(in); + requestProcessorStats.add(new ProcessorStats(processorName, processorType, processorStats)); + } + int numResponseProcessors = in.readVInt(); + List responseProcessorStats = new ArrayList<>(numResponseProcessors); + for (int j = 0; j < numResponseProcessors; j++) { + String processorName = in.readString(); + String processorType = in.readString(); + OperationStats processorStats = new OperationStats(in); + responseProcessorStats.add(new ProcessorStats(processorName, processorType, processorStats)); + } + pipelineDetailStatsMap.put( + pipelineId, + new PipelineDetailStats(unmodifiableList(requestProcessorStats), unmodifiableList(responseProcessorStats)) + ); + } + this.perPipelineStats = unmodifiableList(perPipelineStats); + this.perPipelineProcessorStats = unmodifiableMap(pipelineDetailStatsMap); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject("search_pipeline"); + builder.startObject("total_request"); + totalRequestStats.toXContent(builder, params); + builder.endObject(); + builder.startObject("total_response"); + totalResponseStats.toXContent(builder, params); + builder.endObject(); + builder.startObject("pipelines"); + for (PerPipelineStats pipelineStat : perPipelineStats) { + builder.startObject(pipelineStat.pipelineId); + builder.startObject("request"); + pipelineStat.requestStats.toXContent(builder, params); + builder.endObject(); + builder.startObject("response"); + pipelineStat.responseStats.toXContent(builder, params); + builder.endObject(); + + PipelineDetailStats pipelineDetailStats = perPipelineProcessorStats.get(pipelineStat.pipelineId); + builder.startArray("request_processors"); + for (ProcessorStats processorStats : pipelineDetailStats.requestProcessorStats) { + builder.startObject(); + processorStats.toXContent(builder, params); + builder.endObject(); + } + builder.endArray(); + builder.startArray("response_processors"); + for (ProcessorStats processorStats : pipelineDetailStats.responseProcessorStats) { + builder.startObject(); + processorStats.toXContent(builder, params); + builder.endObject(); + } + builder.endArray(); + builder.endObject(); + } + builder.endObject(); + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + totalRequestStats.writeTo(out); + totalResponseStats.writeTo(out); + out.writeVInt(perPipelineStats.size()); + for (PerPipelineStats pipelineStat : perPipelineStats) { + out.writeString(pipelineStat.pipelineId); + pipelineStat.requestStats.writeTo(out); + pipelineStat.responseStats.writeTo(out); + PipelineDetailStats pipelineDetailStats = perPipelineProcessorStats.get(pipelineStat.pipelineId); + out.writeVInt(pipelineDetailStats.requestProcessorStats.size()); + for (ProcessorStats processorStats : pipelineDetailStats.requestProcessorStats) { + out.writeString(processorStats.processorName); + out.writeString(processorStats.processorType); + processorStats.stats.writeTo(out); + } + out.writeVInt(pipelineDetailStats.responseProcessorStats.size()); + for (ProcessorStats processorStats : pipelineDetailStats.responseProcessorStats) { + out.writeString(processorStats.processorName); + out.writeString(processorStats.processorType); + processorStats.stats.writeTo(out); + } + } + } + + static class Builder { + private OperationStats totalRequestStats; + private OperationStats totalResponseStats; + private final List perPipelineStats = new ArrayList<>(); + private final Map> requestProcessorStatsPerPipeline = new HashMap<>(); + private final Map> responseProcessorStatsPerPipeline = new HashMap<>(); + + Builder withTotalStats(OperationMetrics totalRequestMetrics, OperationMetrics totalResponseMetrics) { + this.totalRequestStats = totalRequestMetrics.createStats(); + this.totalResponseStats = totalResponseMetrics.createStats(); + return this; + } + + Builder addPipelineStats(String pipelineId, OperationMetrics pipelineRequestMetrics, OperationMetrics pipelineResponseMetrics) { + this.perPipelineStats.add( + new PerPipelineStats(pipelineId, pipelineRequestMetrics.createStats(), pipelineResponseMetrics.createStats()) + ); + return this; + } + + Builder addRequestProcessorStats(String pipelineId, String processorName, String processorType, OperationMetrics processorMetrics) { + this.requestProcessorStatsPerPipeline.computeIfAbsent(pipelineId, k -> new ArrayList<>()) + .add(new ProcessorStats(processorName, processorType, processorMetrics.createStats())); + return this; + } + + Builder addResponseProcessorStats( + String pipelineId, + String processorName, + String processorType, + OperationMetrics processorMetrics + ) { + this.responseProcessorStatsPerPipeline.computeIfAbsent(pipelineId, k -> new ArrayList<>()) + .add(new ProcessorStats(processorName, processorType, processorMetrics.createStats())); + return this; + } + + SearchPipelineStats build() { + Map pipelineDetailStatsMap = new TreeMap<>(); + for (PerPipelineStats pipelineStat : perPipelineStats) { + List requestProcessorStats = requestProcessorStatsPerPipeline.getOrDefault( + pipelineStat.pipelineId, + emptyList() + ); + List responseProcessorStats = responseProcessorStatsPerPipeline.getOrDefault( + pipelineStat.pipelineId, + emptyList() + ); + PipelineDetailStats pipelineDetailStats = new PipelineDetailStats( + unmodifiableList(requestProcessorStats), + unmodifiableList(responseProcessorStats) + ); + pipelineDetailStatsMap.put(pipelineStat.pipelineId, pipelineDetailStats); + } + return new SearchPipelineStats( + totalRequestStats, + totalResponseStats, + unmodifiableList(perPipelineStats), + unmodifiableMap(pipelineDetailStatsMap) + ); + } + } + + static class PerPipelineStats { + private final String pipelineId; + private final OperationStats requestStats; + private final OperationStats responseStats; + + public PerPipelineStats(String pipelineId, OperationStats requestStats, OperationStats responseStats) { + this.pipelineId = pipelineId; + this.requestStats = requestStats; + this.responseStats = responseStats; + } + + public String getPipelineId() { + return pipelineId; + } + + public OperationStats getRequestStats() { + return requestStats; + } + + public OperationStats getResponseStats() { + return responseStats; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + PerPipelineStats that = (PerPipelineStats) o; + return pipelineId.equals(that.pipelineId) && requestStats.equals(that.requestStats) && responseStats.equals(that.responseStats); + } + + @Override + public int hashCode() { + return Objects.hash(pipelineId, requestStats, responseStats); + } + } + + static class PipelineDetailStats { + private final List requestProcessorStats; + private final List responseProcessorStats; + + public PipelineDetailStats(List requestProcessorStats, List responseProcessorStats) { + this.requestProcessorStats = requestProcessorStats; + this.responseProcessorStats = responseProcessorStats; + } + + public List requestProcessorStats() { + return requestProcessorStats; + } + + public List responseProcessorStats() { + return responseProcessorStats; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + PipelineDetailStats that = (PipelineDetailStats) o; + return requestProcessorStats.equals(that.requestProcessorStats) && responseProcessorStats.equals(that.responseProcessorStats); + } + + @Override + public int hashCode() { + return Objects.hash(requestProcessorStats, responseProcessorStats); + } + } + + static class ProcessorStats implements ToXContentFragment { + private final String processorName; // type:tag + private final String processorType; + private final OperationStats stats; + + public ProcessorStats(String processorName, String processorType, OperationStats stats) { + this.processorName = processorName; + this.processorType = processorType; + this.stats = stats; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ProcessorStats that = (ProcessorStats) o; + return processorName.equals(that.processorName) && processorType.equals(that.processorType) && stats.equals(that.stats); + } + + @Override + public int hashCode() { + return Objects.hash(processorName, processorType, stats); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(processorName); + builder.field("type", processorType); + builder.startObject("stats"); + stats.toXContent(builder, params); + builder.endObject(); + builder.endObject(); + return builder; + } + + String getProcessorName() { + return processorName; + } + + String getProcessorType() { + return processorType; + } + + OperationStats getStats() { + return stats; + } + } + + OperationStats getTotalRequestStats() { + return totalRequestStats; + } + + OperationStats getTotalResponseStats() { + return totalResponseStats; + } + + List getPipelineStats() { + return perPipelineStats; + } + + Map getPerPipelineProcessorStats() { + return perPipelineProcessorStats; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + SearchPipelineStats stats = (SearchPipelineStats) o; + return totalRequestStats.equals(stats.totalRequestStats) + && totalResponseStats.equals(stats.totalResponseStats) + && perPipelineStats.equals(stats.perPipelineStats) + && perPipelineProcessorStats.equals(stats.perPipelineProcessorStats); + } + + @Override + public int hashCode() { + return Objects.hash(totalRequestStats, totalResponseStats, perPipelineStats, perPipelineProcessorStats); + } +} diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreService.java b/server/src/main/java/org/opensearch/snapshots/RestoreService.java index 3d4b04889a5c9..ebd0e59599c21 100644 --- a/server/src/main/java/org/opensearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/opensearch/snapshots/RestoreService.java @@ -450,12 +450,25 @@ public ClusterState execute(ClusterState currentState) { final boolean isSearchableSnapshot = IndexModule.Type.REMOTE_SNAPSHOT.match( snapshotIndexMetadata.getSettings().get(IndexModule.INDEX_STORE_TYPE_SETTING.getKey()) ); + final boolean isRemoteStoreShallowCopy = Boolean.TRUE.equals( + snapshotInfo.isRemoteStoreIndexShallowCopyEnabled() + ) && metadata.index(index).getSettings().getAsBoolean(SETTING_REMOTE_STORE_ENABLED, false); + if (isRemoteStoreShallowCopy && !currentState.getNodes().getMinNodeVersion().onOrAfter(Version.V_2_9_0)) { + throw new SnapshotRestoreException( + snapshot, + "cannot restore shallow copy snapshot for index [" + + index + + "] as some of the nodes in cluster have version less than 2.9" + ); + } final SnapshotRecoverySource recoverySource = new SnapshotRecoverySource( restoreUUID, snapshot, snapshotInfo.version(), repositoryData.resolveIndexId(index), - isSearchableSnapshot + isSearchableSnapshot, + isRemoteStoreShallowCopy, + request.getSourceRemoteStoreRepository() ); final Version minIndexCompatibilityVersion; if (isSearchableSnapshot && isSearchableSnapshotsExtendedCompatibilityEnabled()) { diff --git a/server/src/main/java/org/opensearch/tasks/CancellableTask.java b/server/src/main/java/org/opensearch/tasks/CancellableTask.java index d32d04f006bbd..dc28c26700e6c 100644 --- a/server/src/main/java/org/opensearch/tasks/CancellableTask.java +++ b/server/src/main/java/org/opensearch/tasks/CancellableTask.java @@ -33,10 +33,10 @@ package org.opensearch.tasks; import org.opensearch.common.Nullable; +import org.opensearch.common.SetOnce; import org.opensearch.common.unit.TimeValue; import java.util.Map; -import java.util.concurrent.atomic.AtomicBoolean; import static org.opensearch.search.SearchService.NO_TIMEOUT; @@ -47,17 +47,26 @@ */ public abstract class CancellableTask extends Task { - private volatile String reason; - private final AtomicBoolean cancelled = new AtomicBoolean(false); + private static class CancelledInfo { + String reason; + /** + * The time this task was cancelled as a wall clock time since epoch ({@link System#currentTimeMillis()} style). + */ + Long cancellationStartTime; + /** + * The time this task was cancelled as a relative time ({@link System#nanoTime()} style). + */ + Long cancellationStartTimeNanos; + + public CancelledInfo(String reason) { + this.reason = reason; + this.cancellationStartTime = System.currentTimeMillis(); + this.cancellationStartTimeNanos = System.nanoTime(); + } + } + + private final SetOnce cancelledInfo = new SetOnce<>(); private final TimeValue cancelAfterTimeInterval; - /** - * The time this task was cancelled as a wall clock time since epoch ({@link System#currentTimeMillis()} style). - */ - private Long cancellationStartTime = null; - /** - * The time this task was cancelled as a relative time ({@link System#nanoTime()} style). - */ - private Long cancellationStartTimeNanos = null; public CancellableTask(long id, String type, String action, String description, TaskId parentTaskId, Map headers) { this(id, type, action, description, parentTaskId, headers, NO_TIMEOUT); @@ -81,14 +90,29 @@ public CancellableTask( */ public void cancel(String reason) { assert reason != null; - if (cancelled.compareAndSet(false, true)) { - this.cancellationStartTime = System.currentTimeMillis(); - this.cancellationStartTimeNanos = System.nanoTime(); - this.reason = reason; + if (cancelledInfo.trySet(new CancelledInfo(reason))) { onCancelled(); } } + public boolean isCancelled() { + return cancelledInfo.get() != null; + } + + /** + * Returns true if this task can potentially have children that need to be cancelled when it parent is cancelled. + */ + public abstract boolean shouldCancelChildrenOnCancellation(); + + public TimeValue getCancellationTimeout() { + return cancelAfterTimeInterval; + } + + /** + * Called after the task is cancelled so that it can take any actions that it has to take. + */ + protected void onCancelled() {} + /** * Returns true if this task should be automatically cancelled if the coordinating node that * requested this task left the cluster. @@ -97,37 +121,24 @@ public boolean cancelOnParentLeaving() { return true; } + @Nullable public Long getCancellationStartTime() { - return cancellationStartTime; + CancelledInfo info = cancelledInfo.get(); + return (info != null) ? info.cancellationStartTime : null; } + @Nullable public Long getCancellationStartTimeNanos() { - return cancellationStartTimeNanos; - } - - /** - * Returns true if this task can potentially have children that need to be cancelled when it parent is cancelled. - */ - public abstract boolean shouldCancelChildrenOnCancellation(); - - public boolean isCancelled() { - return cancelled.get(); - } - - public TimeValue getCancellationTimeout() { - return cancelAfterTimeInterval; + CancelledInfo info = cancelledInfo.get(); + return (info != null) ? info.cancellationStartTimeNanos : null; } /** * The reason the task was cancelled or null if it hasn't been cancelled. */ @Nullable - public final String getReasonCancelled() { - return reason; + public String getReasonCancelled() { + CancelledInfo info = cancelledInfo.get(); + return (info != null) ? info.reason : null; } - - /** - * Called after the task is cancelled so that it can take any actions that it has to take. - */ - protected void onCancelled() {} } diff --git a/server/src/main/java/org/opensearch/tasks/TaskThreadContextStatePropagator.java b/server/src/main/java/org/opensearch/tasks/TaskThreadContextStatePropagator.java new file mode 100644 index 0000000000000..ed111b34f048f --- /dev/null +++ b/server/src/main/java/org/opensearch/tasks/TaskThreadContextStatePropagator.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.tasks; + +import org.opensearch.common.util.concurrent.ThreadContextStatePropagator; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static org.opensearch.tasks.TaskResourceTrackingService.TASK_ID; + +/** + * Propagates TASK_ID across thread contexts + */ +public class TaskThreadContextStatePropagator implements ThreadContextStatePropagator { + @Override + public Map transients(Map source) { + final Map transients = new HashMap<>(); + + if (source.containsKey(TASK_ID)) { + transients.put(TASK_ID, source.get(TASK_ID)); + } + + return transients; + } + + @Override + public Map headers(Map source) { + return Collections.emptyMap(); + } +} diff --git a/server/src/main/java/org/opensearch/telemetry/TelemetryModule.java b/server/src/main/java/org/opensearch/telemetry/TelemetryModule.java new file mode 100644 index 0000000000000..604c111b0720c --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/TelemetryModule.java @@ -0,0 +1,47 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry; + +import org.opensearch.plugins.TelemetryPlugin; + +import java.util.List; +import java.util.Optional; + +/** + * A module for loading classes for telemetry + * + * @opensearch.internal + */ +public class TelemetryModule { + + private Telemetry telemetry; + + public TelemetryModule(List telemetryPlugins, TelemetrySettings telemetrySettings) { + + for (TelemetryPlugin telemetryPlugin : telemetryPlugins) { + Optional telemetry = telemetryPlugin.getTelemetry(telemetrySettings); + if (telemetry.isPresent()) { + registerTelemetry(telemetry.get()); + } + } + } + + public Optional getTelemetry() { + return Optional.ofNullable(telemetry); + } + + private void registerTelemetry(Telemetry factory) { + if (telemetry == null) { + telemetry = factory; + } else { + throw new IllegalArgumentException("Cannot register more than one telemetry"); + } + } + +} diff --git a/server/src/main/java/org/opensearch/telemetry/TelemetrySettings.java b/server/src/main/java/org/opensearch/telemetry/TelemetrySettings.java new file mode 100644 index 0000000000000..7c9e0d5ac8097 --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/TelemetrySettings.java @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; + +/** + * Wrapper class to encapsulate tracing related settings + */ +public class TelemetrySettings { + public static final Setting TRACER_ENABLED_SETTING = Setting.boolSetting( + "telemetry.tracer.enabled", + false, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + private volatile boolean tracingEnabled; + + public TelemetrySettings(Settings settings, ClusterSettings clusterSettings) { + this.tracingEnabled = TRACER_ENABLED_SETTING.get(settings); + + clusterSettings.addSettingsUpdateConsumer(TRACER_ENABLED_SETTING, this::setTracingEnabled); + } + + public void setTracingEnabled(boolean tracingEnabled) { + this.tracingEnabled = tracingEnabled; + } + + public boolean isTracingEnabled() { + return tracingEnabled; + } + +} diff --git a/server/src/main/java/org/opensearch/telemetry/package-info.java b/server/src/main/java/org/opensearch/telemetry/package-info.java new file mode 100644 index 0000000000000..4545f0ef5990e --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains classes needed for telemetry. + */ +package org.opensearch.telemetry; diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/NoopTracerFactory.java b/server/src/main/java/org/opensearch/telemetry/tracing/NoopTracerFactory.java new file mode 100644 index 0000000000000..3d7f8133788ce --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/tracing/NoopTracerFactory.java @@ -0,0 +1,33 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import org.opensearch.telemetry.tracing.noop.NoopTracer; + +import java.util.Optional; + +/** + * No-op implementation of TracerFactory + */ +public class NoopTracerFactory extends TracerFactory { + public NoopTracerFactory() { + super(null, Optional.empty(), null); + } + + @Override + public Tracer getTracer() { + return NoopTracer.INSTANCE; + } + + @Override + public void close() { + + } + +} diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/ThreadContextBasedTracerContextStorage.java b/server/src/main/java/org/opensearch/telemetry/tracing/ThreadContextBasedTracerContextStorage.java new file mode 100644 index 0000000000000..0d0b795fdc715 --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/tracing/ThreadContextBasedTracerContextStorage.java @@ -0,0 +1,93 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.concurrent.ThreadContextStatePropagator; + +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; + +/** + * Core's ThreadContext based TracerContextStorage implementation + */ +public class ThreadContextBasedTracerContextStorage implements TracerContextStorage, ThreadContextStatePropagator { + + private final ThreadContext threadContext; + + private final TracingTelemetry tracingTelemetry; + + public ThreadContextBasedTracerContextStorage(ThreadContext threadContext, TracingTelemetry tracingTelemetry) { + this.threadContext = Objects.requireNonNull(threadContext); + this.tracingTelemetry = Objects.requireNonNull(tracingTelemetry); + this.threadContext.registerThreadContextStatePropagator(this); + } + + @Override + public Span get(String key) { + return getCurrentSpan(key); + } + + @Override + public void put(String key, Span span) { + if (span == null) { + return; + } + SpanReference currentSpanRef = threadContext.getTransient(key); + if (currentSpanRef == null) { + threadContext.putTransient(key, new SpanReference(span)); + } else { + currentSpanRef.setSpan(span); + } + } + + @Override + public Map transients(Map source) { + final Map transients = new HashMap<>(); + + if (source.containsKey(CURRENT_SPAN)) { + final SpanReference current = (SpanReference) source.get(CURRENT_SPAN); + if (current != null) { + transients.put(CURRENT_SPAN, new SpanReference(current.getSpan())); + } + } + + return transients; + } + + @Override + public Map headers(Map source) { + final Map headers = new HashMap<>(); + + if (source.containsKey(CURRENT_SPAN)) { + final SpanReference current = (SpanReference) source.get(CURRENT_SPAN); + if (current != null) { + tracingTelemetry.getContextPropagator().inject(current.getSpan(), (key, value) -> headers.put(key, value)); + } + } + + return headers; + } + + Span getCurrentSpan(String key) { + Optional optionalSpanFromContext = spanFromThreadContext(key); + return optionalSpanFromContext.orElse(spanFromHeader()); + } + + private Optional spanFromThreadContext(String key) { + SpanReference currentSpanRef = threadContext.getTransient(key); + return (currentSpanRef == null) ? Optional.empty() : Optional.ofNullable(currentSpanRef.getSpan()); + } + + private Span spanFromHeader() { + return tracingTelemetry.getContextPropagator().extract(threadContext.getHeaders()); + } +} diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/TracerFactory.java b/server/src/main/java/org/opensearch/telemetry/tracing/TracerFactory.java new file mode 100644 index 0000000000000..8228cded4c822 --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/tracing/TracerFactory.java @@ -0,0 +1,70 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.telemetry.Telemetry; +import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.telemetry.tracing.noop.NoopTracer; + +import java.io.Closeable; +import java.io.IOException; +import java.util.Optional; + +/** + * TracerManager represents a single global class that is used to access tracers. + * + * The Tracer singleton object can be retrieved using tracerManager.getTracer(). The TracerManager object + * is created during class initialization and cannot subsequently be changed. + */ +public class TracerFactory implements Closeable { + + private static final Logger logger = LogManager.getLogger(TracerFactory.class); + + private final TelemetrySettings telemetrySettings; + private final Tracer defaultTracer; + + public TracerFactory(TelemetrySettings telemetrySettings, Optional telemetry, ThreadContext threadContext) { + this.telemetrySettings = telemetrySettings; + this.defaultTracer = telemetry.map(Telemetry::getTracingTelemetry) + .map(tracingTelemetry -> createDefaultTracer(tracingTelemetry, threadContext)) + .orElse(NoopTracer.INSTANCE); + } + + /** + * Returns the tracer instance + * @return tracer instance + */ + public Tracer getTracer() { + return telemetrySettings.isTracingEnabled() ? defaultTracer : NoopTracer.INSTANCE; + } + + /** + * Closes the {@link Tracer} + */ + @Override + public void close() { + try { + defaultTracer.close(); + } catch (IOException e) { + logger.warn("Error closing tracer", e); + } + } + + private Tracer createDefaultTracer(TracingTelemetry tracingTelemetry, ThreadContext threadContext) { + TracerContextStorage tracerContextStorage = new ThreadContextBasedTracerContextStorage( + threadContext, + tracingTelemetry + ); + return new DefaultTracer(tracingTelemetry, tracerContextStorage); + } + +} diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/package-info.java b/server/src/main/java/org/opensearch/telemetry/tracing/package-info.java new file mode 100644 index 0000000000000..4ac1e4c212c81 --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/tracing/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains classes needed for tracing requests. + */ +package org.opensearch.telemetry.tracing; diff --git a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java index 2c91d5aa33090..ebc68c288e25a 100644 --- a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java @@ -330,6 +330,19 @@ public long relativeTimeInNanos() { return cachedTimeThread.relativeTimeInNanos(); } + /** + * Returns a value of nanoseconds that may be used for relative time calculations + * that require the highest precision possible. Performance critical code must use + * either {@link #relativeTimeInNanos()} or {@link #relativeTimeInMillis()} which + * give better performance at the cost of lower precision. + * + * This method should only be used for calculating time deltas. For an epoch based + * timestamp, see {@link #absoluteTimeInMillis()}. + */ + public long preciseRelativeTimeInNanos() { + return System.nanoTime(); + } + /** * Returns the value of milliseconds since UNIX epoch. * diff --git a/server/src/main/java/org/opensearch/transport/TransportService.java b/server/src/main/java/org/opensearch/transport/TransportService.java index f41112d69196b..021bf39fc5a1f 100644 --- a/server/src/main/java/org/opensearch/transport/TransportService.java +++ b/server/src/main/java/org/opensearch/transport/TransportService.java @@ -544,7 +544,10 @@ public ConnectionManager.ConnectionValidator connectionValidatorForExtensionConn ) { return (newConnection, actualProfile, listener) -> { // We don't validate cluster names to allow for CCS connections. - threadPool.getThreadContext().putHeader("extension_unique_id", extensionUniqueId); + String currentId = threadPool.getThreadContext().getHeader("extension_unique_id"); + if (Strings.isNullOrEmpty(currentId) || !extensionUniqueId.equals(currentId)) { + threadPool.getThreadContext().putHeader("extension_unique_id", extensionUniqueId); + } handshake(newConnection, actualProfile.getHandshakeTimeout().millis(), cn -> true, ActionListener.map(listener, resp -> { final DiscoveryNode remote = resp.discoveryNode; diff --git a/sandbox/plugins/custom-codecs/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec b/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec similarity index 100% rename from sandbox/plugins/custom-codecs/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec rename to server/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec diff --git a/server/src/test/java/org/opensearch/action/ActionModuleTests.java b/server/src/test/java/org/opensearch/action/ActionModuleTests.java index 66af9ebfd814f..109c60aa1e4f1 100644 --- a/server/src/test/java/org/opensearch/action/ActionModuleTests.java +++ b/server/src/test/java/org/opensearch/action/ActionModuleTests.java @@ -46,6 +46,7 @@ import org.opensearch.common.settings.SettingsFilter; import org.opensearch.common.settings.SettingsModule; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.extensions.ExtensionsManager; import org.opensearch.identity.IdentityService; import org.opensearch.plugins.ActionPlugin; import org.opensearch.plugins.ActionPlugin.ActionHandler; @@ -65,6 +66,7 @@ import java.io.IOException; import java.util.List; +import java.util.Set; import java.util.function.Supplier; import static java.util.Collections.emptyList; @@ -124,7 +126,7 @@ protected FakeAction() { ); } - public void testSetupRestHandlerContainsKnownBuiltin() { + public void testSetupRestHandlerContainsKnownBuiltin() throws IOException { SettingsModule settings = new SettingsModule(Settings.EMPTY); UsageService usageService = new UsageService(); ActionModule actionModule = new ActionModule( @@ -139,7 +141,8 @@ public void testSetupRestHandlerContainsKnownBuiltin() { null, usageService, null, - new IdentityService(Settings.EMPTY, new ArrayList<>()) + new IdentityService(Settings.EMPTY, new ArrayList<>()), + new ExtensionsManager(Set.of()) ); actionModule.initRestHandlers(null); // At this point the easiest way to confirm that a handler is loaded is to try to register another one on top of it and to fail @@ -196,6 +199,7 @@ public String getName() { null, usageService, null, + null, null ); Exception e = expectThrows(IllegalArgumentException.class, () -> actionModule.initRestHandlers(null)); @@ -246,6 +250,7 @@ public List getRestHandlers( null, usageService, null, + null, null ); actionModule.initRestHandlers(null); diff --git a/server/src/test/java/org/opensearch/action/DynamicActionRegistryTests.java b/server/src/test/java/org/opensearch/action/DynamicActionRegistryTests.java index a5b4f91ff1ed5..963d47df3baff 100644 --- a/server/src/test/java/org/opensearch/action/DynamicActionRegistryTests.java +++ b/server/src/test/java/org/opensearch/action/DynamicActionRegistryTests.java @@ -18,7 +18,7 @@ import org.opensearch.extensions.action.ExtensionTransportAction; import org.opensearch.rest.NamedRoute; import org.opensearch.rest.RestRequest; -import org.opensearch.rest.extensions.RestSendToExtensionAction; +import org.opensearch.extensions.rest.RestSendToExtensionAction; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskManager; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java index d99b93b780140..1e2e085333e50 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -37,6 +37,7 @@ import org.opensearch.cluster.service.ClusterManagerThrottlingStats; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.metrics.OperationStats; import org.opensearch.discovery.DiscoveryStats; import org.opensearch.cluster.coordination.PendingClusterStateStats; import org.opensearch.cluster.coordination.PublishClusterStateStats; @@ -338,40 +339,31 @@ public void testSerialization() throws IOException { if (ingestStats == null) { assertNull(deserializedIngestStats); } else { - IngestStats.Stats totalStats = ingestStats.getTotalStats(); - assertEquals(totalStats.getIngestCount(), deserializedIngestStats.getTotalStats().getIngestCount()); - assertEquals(totalStats.getIngestCurrent(), deserializedIngestStats.getTotalStats().getIngestCurrent()); - assertEquals(totalStats.getIngestFailedCount(), deserializedIngestStats.getTotalStats().getIngestFailedCount()); - assertEquals(totalStats.getIngestTimeInMillis(), deserializedIngestStats.getTotalStats().getIngestTimeInMillis()); + OperationStats totalStats = ingestStats.getTotalStats(); + assertEquals(totalStats.getCount(), deserializedIngestStats.getTotalStats().getCount()); + assertEquals(totalStats.getCurrent(), deserializedIngestStats.getTotalStats().getCurrent()); + assertEquals(totalStats.getFailedCount(), deserializedIngestStats.getTotalStats().getFailedCount()); + assertEquals(totalStats.getTotalTimeInMillis(), deserializedIngestStats.getTotalStats().getTotalTimeInMillis()); assertEquals(ingestStats.getPipelineStats().size(), deserializedIngestStats.getPipelineStats().size()); for (IngestStats.PipelineStat pipelineStat : ingestStats.getPipelineStats()) { String pipelineId = pipelineStat.getPipelineId(); - IngestStats.Stats deserializedPipelineStats = getPipelineStats( - deserializedIngestStats.getPipelineStats(), - pipelineId - ); - assertEquals(pipelineStat.getStats().getIngestFailedCount(), deserializedPipelineStats.getIngestFailedCount()); - assertEquals(pipelineStat.getStats().getIngestTimeInMillis(), deserializedPipelineStats.getIngestTimeInMillis()); - assertEquals(pipelineStat.getStats().getIngestCurrent(), deserializedPipelineStats.getIngestCurrent()); - assertEquals(pipelineStat.getStats().getIngestCount(), deserializedPipelineStats.getIngestCount()); + OperationStats deserializedPipelineStats = getPipelineStats(deserializedIngestStats.getPipelineStats(), pipelineId); + assertEquals(pipelineStat.getStats().getFailedCount(), deserializedPipelineStats.getFailedCount()); + assertEquals(pipelineStat.getStats().getTotalTimeInMillis(), deserializedPipelineStats.getTotalTimeInMillis()); + assertEquals(pipelineStat.getStats().getCurrent(), deserializedPipelineStats.getCurrent()); + assertEquals(pipelineStat.getStats().getCount(), deserializedPipelineStats.getCount()); List processorStats = ingestStats.getProcessorStats().get(pipelineId); // intentionally validating identical order Iterator it = deserializedIngestStats.getProcessorStats().get(pipelineId).iterator(); for (IngestStats.ProcessorStat processorStat : processorStats) { IngestStats.ProcessorStat deserializedProcessorStat = it.next(); + assertEquals(processorStat.getStats().getFailedCount(), deserializedProcessorStat.getStats().getFailedCount()); assertEquals( - processorStat.getStats().getIngestFailedCount(), - deserializedProcessorStat.getStats().getIngestFailedCount() - ); - assertEquals( - processorStat.getStats().getIngestTimeInMillis(), - deserializedProcessorStat.getStats().getIngestTimeInMillis() + processorStat.getStats().getTotalTimeInMillis(), + deserializedProcessorStat.getStats().getTotalTimeInMillis() ); - assertEquals( - processorStat.getStats().getIngestCurrent(), - deserializedProcessorStat.getStats().getIngestCurrent() - ); - assertEquals(processorStat.getStats().getIngestCount(), deserializedProcessorStat.getStats().getIngestCount()); + assertEquals(processorStat.getStats().getCurrent(), deserializedProcessorStat.getStats().getCurrent()); + assertEquals(processorStat.getStats().getCount(), deserializedProcessorStat.getStats().getCount()); } assertFalse(it.hasNext()); } @@ -650,7 +642,7 @@ public static NodeStats createNodeStats() { : null; IngestStats ingestStats = null; if (frequently()) { - IngestStats.Stats totalStats = new IngestStats.Stats( + OperationStats totalStats = new OperationStats( randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), @@ -665,7 +657,7 @@ public static NodeStats createNodeStats() { ingestPipelineStats.add( new IngestStats.PipelineStat( pipelineId, - new IngestStats.Stats( + new OperationStats( randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), @@ -676,7 +668,7 @@ public static NodeStats createNodeStats() { List processorPerPipeline = new ArrayList<>(numProcessors); for (int j = 0; j < numProcessors; j++) { - IngestStats.Stats processorStats = new IngestStats.Stats( + OperationStats processorStats = new OperationStats( randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), @@ -750,11 +742,12 @@ public static NodeStats createNodeStats() { clusterManagerThrottlingStats, weightedRoutingStats, null, + null, null ); } - private IngestStats.Stats getPipelineStats(List pipelineStats, String id) { + private OperationStats getPipelineStats(List pipelineStats, String id) { return pipelineStats.stream().filter(p1 -> p1.getPipelineId().equals(id)).findFirst().map(p2 -> p2.getStats()).orElse(null); } } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestTests.java index 737e7b2e4887b..bb55ac810ed09 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestTests.java @@ -112,6 +112,10 @@ private RestoreSnapshotRequest randomState(RestoreSnapshotRequest instance) { instance.snapshotUuid(randomBoolean() ? null : randomAlphaOfLength(10)); } + if (randomBoolean()) { + instance.setSourceRemoteStoreRepository(randomAlphaOfLengthBetween(5, 10)); + } + return instance; } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodesTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodesTests.java index d3a40868bc389..627ada7092273 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodesTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodesTests.java @@ -89,15 +89,15 @@ public void testIngestStats() throws Exception { processorStats.compute(stat.getType(), (key, value) -> { if (value == null) { return new long[] { - stat.getStats().getIngestCount(), - stat.getStats().getIngestFailedCount(), - stat.getStats().getIngestCurrent(), - stat.getStats().getIngestTimeInMillis() }; + stat.getStats().getCount(), + stat.getStats().getFailedCount(), + stat.getStats().getCurrent(), + stat.getStats().getTotalTimeInMillis() }; } else { - value[0] += stat.getStats().getIngestCount(); - value[1] += stat.getStats().getIngestFailedCount(); - value[2] += stat.getStats().getIngestCurrent(); - value[3] += stat.getStats().getIngestTimeInMillis(); + value[0] += stat.getStats().getCount(); + value[1] += stat.getStats().getFailedCount(); + value[2] += stat.getStats().getCurrent(); + value[3] += stat.getStats().getTotalTimeInMillis(); return value; } }); diff --git a/server/src/test/java/org/opensearch/action/admin/indices/cache/clear/TransportClearIndicesCacheActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/cache/clear/TransportClearIndicesCacheActionTests.java index 85aa60d6f308b..57a41f08f73f2 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/cache/clear/TransportClearIndicesCacheActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/cache/clear/TransportClearIndicesCacheActionTests.java @@ -85,7 +85,7 @@ public void testOnShardOperation() throws IOException { when(shardRouting.shardId()).thenReturn(shardId); final ShardPath shardPath = ShardPath.loadFileCachePath(nodeEnvironment, shardId); final Path cacheEntryPath = shardPath.getDataPath(); - final FileCache fileCache = FileCacheFactory.createConcurrentLRUFileCache(1024 * 1024 * 1024, 16, new NoopCircuitBreaker("")); + final FileCache fileCache = FileCacheFactory.createConcurrentLRUFileCache(1024 * 1024, 16, new NoopCircuitBreaker("")); when(testNode.fileCache()).thenReturn(fileCache); when(testNode.getNodeEnvironment()).thenReturn(nodeEnvironment); diff --git a/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java b/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java index 73349d45bd5c7..e5833ea619774 100644 --- a/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java +++ b/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java @@ -189,6 +189,7 @@ public void testFillDiskUsage() { null, null, null, + null, null ), new NodeStats( @@ -214,6 +215,7 @@ public void testFillDiskUsage() { null, null, null, + null, null ), new NodeStats( @@ -239,6 +241,7 @@ public void testFillDiskUsage() { null, null, null, + null, null ) ); @@ -295,6 +298,7 @@ public void testFillDiskUsageSomeInvalidValues() { null, null, null, + null, null ), new NodeStats( @@ -320,6 +324,7 @@ public void testFillDiskUsageSomeInvalidValues() { null, null, null, + null, null ), new NodeStats( @@ -345,6 +350,7 @@ public void testFillDiskUsageSomeInvalidValues() { null, null, null, + null, null ) ); diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index fdef3cb5498b4..fb678e9506571 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -132,7 +132,6 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_READ_ONLY; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_REPOSITORY; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_TRANSLOG_BUFFER_INTERVAL; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_ENABLED; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; @@ -143,6 +142,7 @@ import static org.opensearch.cluster.metadata.MetadataCreateIndexService.getIndexNumberOfRoutingShards; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.parseV1Mappings; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.resolveAndValidateAliases; +import static org.opensearch.index.IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING; import static org.opensearch.index.IndexSettings.INDEX_SOFT_DELETES_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_REPOSITORY_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING; @@ -1219,7 +1219,7 @@ public void testRemoteStoreNoUserOverrideConflictingReplicationTypeIndexSettings ); assertThat( exc.getMessage(), - containsString("Cannot enable [index.remote_store.enabled] when [cluster.indices.replication.strategy] is DOCUMENT") + containsString("Cannot enable [index.remote_store.enabled] when [index.replication.type] is DOCUMENT") ); } @@ -1254,7 +1254,7 @@ public void testRemoteStoreNoUserOverrideExceptReplicationTypeSegmentIndexSettin "true", "my-translog-repo-1", ReplicationType.SEGMENT.toString(), - null + IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL ); } @@ -1286,7 +1286,7 @@ public void testRemoteStoreNoUserOverrideIndexSettings() { "true", "my-translog-repo-1", ReplicationType.SEGMENT.toString(), - null + IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL ); } @@ -1313,7 +1313,15 @@ public void testRemoteStoreDisabledByUserIndexSettings() { randomShardLimitService(), Collections.emptySet() ); - verifyRemoteStoreIndexSettings(indexSettings, "false", null, null, null, ReplicationType.SEGMENT.toString(), null); + verifyRemoteStoreIndexSettings( + indexSettings, + "false", + null, + null, + null, + ReplicationType.SEGMENT.toString(), + IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL + ); } public void testRemoteStoreTranslogDisabledByUserIndexSettings() { @@ -1339,7 +1347,15 @@ public void testRemoteStoreTranslogDisabledByUserIndexSettings() { randomShardLimitService(), Collections.emptySet() ); - verifyRemoteStoreIndexSettings(indexSettings, "true", "my-segment-repo-1", "false", null, ReplicationType.SEGMENT.toString(), null); + verifyRemoteStoreIndexSettings( + indexSettings, + "true", + "my-segment-repo-1", + "false", + null, + ReplicationType.SEGMENT.toString(), + IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL + ); } public void testRemoteStoreOverrideSegmentRepoIndexSettings() { @@ -1375,7 +1391,7 @@ public void testRemoteStoreOverrideSegmentRepoIndexSettings() { "true", "my-translog-repo-1", ReplicationType.SEGMENT.toString(), - null + IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL ); } @@ -1409,7 +1425,7 @@ public void testRemoteStoreOverrideTranslogRepoIndexSettings() { "true", "my-custom-repo", ReplicationType.SEGMENT.toString(), - null + IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL ); } @@ -1436,7 +1452,15 @@ public void testRemoteStoreOverrideReplicationTypeIndexSettings() { randomShardLimitService(), Collections.emptySet() ); - verifyRemoteStoreIndexSettings(indexSettings, null, null, null, null, ReplicationType.DOCUMENT.toString(), null); + verifyRemoteStoreIndexSettings( + indexSettings, + null, + null, + null, + null, + ReplicationType.DOCUMENT.toString(), + IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL + ); } public void testBuildIndexMetadata() { @@ -1628,7 +1652,7 @@ public void testIndexSettingOverridesClusterReplicationSetting() { // Verify if index setting overrides cluster replication setting assertEquals(ReplicationType.DOCUMENT.toString(), indexSettings.get(SETTING_REPLICATION_TYPE)); } - + private IndexTemplateMetadata addMatchingTemplate(Consumer configurator) { IndexTemplateMetadata.Builder builder = templateMetadataBuilder("template1", "te*"); configurator.accept(builder); @@ -1681,14 +1705,14 @@ private void verifyRemoteStoreIndexSettings( String isRemoteTranslogEnabled, String remoteTranslogRepo, String replicationType, - String translogBufferInterval + TimeValue translogBufferInterval ) { assertEquals(replicationType, indexSettings.get(SETTING_REPLICATION_TYPE)); assertEquals(isRemoteSegmentEnabled, indexSettings.get(SETTING_REMOTE_STORE_ENABLED)); assertEquals(remoteSegmentRepo, indexSettings.get(SETTING_REMOTE_STORE_REPOSITORY)); assertEquals(isRemoteTranslogEnabled, indexSettings.get(SETTING_REMOTE_TRANSLOG_STORE_ENABLED)); assertEquals(remoteTranslogRepo, indexSettings.get(SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY)); - assertEquals(translogBufferInterval, indexSettings.get(SETTING_REMOTE_TRANSLOG_BUFFER_INTERVAL)); + assertEquals(translogBufferInterval, INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(indexSettings)); } } diff --git a/server/src/test/java/org/opensearch/cluster/routing/RecoverySourceTests.java b/server/src/test/java/org/opensearch/cluster/routing/RecoverySourceTests.java index e4aae52f41e68..a5c006362a20c 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/RecoverySourceTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/RecoverySourceTests.java @@ -36,6 +36,8 @@ import org.opensearch.common.UUIDs; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.repositories.IndexId; +import org.opensearch.snapshots.Snapshot; +import org.opensearch.snapshots.SnapshotId; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -54,6 +56,28 @@ public void testSerialization() throws IOException { assertEquals(recoverySource, serializedRecoverySource); } + public void testSerializationSnapshotRecoverySource() throws IOException { + boolean isSearchableSnapshot = randomBoolean(); + boolean isRemoteStoreShallowCopyEnabled = randomBoolean(); + String sourceRemoteStoreRepo = "test-remote-repo"; + RecoverySource.SnapshotRecoverySource recoverySource = new RecoverySource.SnapshotRecoverySource( + UUIDs.randomBase64UUID(), + new Snapshot("repo", new SnapshotId(randomAlphaOfLength(8), UUIDs.randomBase64UUID())), + Version.CURRENT, + new IndexId("some_index", UUIDs.randomBase64UUID(random())), + isSearchableSnapshot, + isRemoteStoreShallowCopyEnabled, + sourceRemoteStoreRepo + ); + BytesStreamOutput out = new BytesStreamOutput(); + recoverySource.writeTo(out); + RecoverySource serializedRecoverySource = RecoverySource.readFrom(out.bytes().streamInput()); + assertEquals(recoverySource.getType(), serializedRecoverySource.getType()); + assertEquals(recoverySource, serializedRecoverySource); + assertEquals(recoverySource.remoteStoreIndexShallowCopy(), isRemoteStoreShallowCopyEnabled); + assertEquals(recoverySource.isSearchableSnapshot(), isSearchableSnapshot); + } + public void testRecoverySourceTypeOrder() { assertEquals(RecoverySource.Type.EMPTY_STORE.ordinal(), 0); assertEquals(RecoverySource.Type.EXISTING_STORE.ordinal(), 1); diff --git a/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java b/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java index fb47cb8e2d65a..3c27748daa87d 100644 --- a/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java @@ -97,14 +97,14 @@ public class MasterServiceTests extends OpenSearchTestCase { private static ThreadPool threadPool; - private static long relativeTimeInMillis; + private static long timeDiffInMillis; @BeforeClass public static void createThreadPool() { threadPool = new TestThreadPool(MasterServiceTests.class.getName()) { @Override - public long relativeTimeInMillis() { - return relativeTimeInMillis; + public long preciseRelativeTimeInNanos() { + return timeDiffInMillis * TimeValue.NSEC_PER_MSEC; } }; } @@ -119,7 +119,7 @@ public static void stopThreadPool() { @Before public void randomizeCurrentTime() { - relativeTimeInMillis = randomLongBetween(0L, 1L << 62); + timeDiffInMillis = randomLongBetween(0L, 1L << 50); } private ClusterManagerService createClusterManagerService(boolean makeClusterManager) { @@ -426,7 +426,7 @@ public void testClusterStateUpdateLogging() throws Exception { clusterManagerService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { - relativeTimeInMillis += TimeValue.timeValueSeconds(1).millis(); + timeDiffInMillis += TimeValue.timeValueSeconds(1).millis(); return currentState; } @@ -441,7 +441,7 @@ public void onFailure(String source, Exception e) { clusterManagerService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { - relativeTimeInMillis += TimeValue.timeValueSeconds(2).millis(); + timeDiffInMillis += TimeValue.timeValueSeconds(2).millis(); throw new IllegalArgumentException("Testing handling of exceptions in the cluster state task"); } @@ -456,13 +456,13 @@ public void onFailure(String source, Exception e) {} clusterManagerService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { - relativeTimeInMillis += TimeValue.timeValueSeconds(3).millis(); + timeDiffInMillis += TimeValue.timeValueSeconds(3).millis(); return ClusterState.builder(currentState).incrementVersion().build(); } @Override public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - relativeTimeInMillis += TimeValue.timeValueSeconds(4).millis(); + timeDiffInMillis += TimeValue.timeValueSeconds(4).millis(); } @Override @@ -1080,12 +1080,12 @@ public void testLongClusterStateUpdateLogging() throws Exception { final AtomicReference clusterStateRef = new AtomicReference<>(initialClusterState); clusterManagerService.setClusterStatePublisher((event, publishListener, ackListener) -> { if (event.source().contains("test5")) { - relativeTimeInMillis += ClusterManagerService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( + timeDiffInMillis += ClusterManagerService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( Settings.EMPTY ).millis() + randomLongBetween(1, 1000000); } if (event.source().contains("test6")) { - relativeTimeInMillis += ClusterManagerService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( + timeDiffInMillis += ClusterManagerService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( Settings.EMPTY ).millis() + randomLongBetween(1, 1000000); throw new OpenSearchException("simulated error during slow publication which should trigger logging"); @@ -1101,7 +1101,7 @@ public void testLongClusterStateUpdateLogging() throws Exception { clusterManagerService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { - relativeTimeInMillis += randomLongBetween( + timeDiffInMillis += randomLongBetween( 0L, ClusterManagerService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(Settings.EMPTY).millis() ); @@ -1124,7 +1124,7 @@ public void onFailure(String source, Exception e) { clusterManagerService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { - relativeTimeInMillis += ClusterManagerService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( + timeDiffInMillis += ClusterManagerService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( Settings.EMPTY ).millis() + randomLongBetween(1, 1000000); throw new IllegalArgumentException("Testing handling of exceptions in the cluster state task"); @@ -1143,7 +1143,7 @@ public void onFailure(String source, Exception e) { clusterManagerService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { - relativeTimeInMillis += ClusterManagerService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( + timeDiffInMillis += ClusterManagerService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( Settings.EMPTY ).millis() + randomLongBetween(1, 1000000); return ClusterState.builder(currentState).incrementVersion().build(); @@ -1162,7 +1162,7 @@ public void onFailure(String source, Exception e) { clusterManagerService.submitStateUpdateTask("test4", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { - relativeTimeInMillis += ClusterManagerService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( + timeDiffInMillis += ClusterManagerService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( Settings.EMPTY ).millis() + randomLongBetween(1, 1000000); return currentState; diff --git a/server/src/test/java/org/opensearch/common/blobstore/fs/FsBlobContainerTests.java b/server/src/test/java/org/opensearch/common/blobstore/fs/FsBlobContainerTests.java index 6c36368bfe446..f139a5d4e3bb1 100644 --- a/server/src/test/java/org/opensearch/common/blobstore/fs/FsBlobContainerTests.java +++ b/server/src/test/java/org/opensearch/common/blobstore/fs/FsBlobContainerTests.java @@ -34,6 +34,9 @@ import org.apache.lucene.tests.mockfile.FilterFileSystemProvider; import org.apache.lucene.tests.mockfile.FilterSeekableByteChannel; import org.apache.lucene.tests.util.LuceneTestCase; +import org.opensearch.action.ActionListener; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.io.PathUtils; import org.opensearch.common.io.PathUtilsForTesting; @@ -54,10 +57,14 @@ import java.nio.file.Path; import java.nio.file.attribute.FileAttribute; import java.nio.file.spi.FileSystemProvider; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; import java.util.Locale; import java.util.Set; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Consumer; +import java.util.stream.Collectors; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -118,6 +125,79 @@ public void testIsTempBlobName() { assertThat(FsBlobContainer.isTempBlobName(tempBlobName), is(true)); } + private void testListBlobsByPrefixInSortedOrder(int limit, BlobContainer.BlobNameSortOrder blobNameSortOrder) throws IOException { + + final Path path = PathUtils.get(createTempDir().toString()); + + List blobsInFileSystem = new ArrayList<>(); + for (int i = 0; i < 10; i++) { + final String blobName = randomAlphaOfLengthBetween(1, 20).toLowerCase(Locale.ROOT); + final byte[] blobData = randomByteArrayOfLength(randomIntBetween(1, frequently() ? 512 : 1 << 20)); // rarely up to 1mb + Files.write(path.resolve(blobName), blobData); + blobsInFileSystem.add(blobName); + } + + final FsBlobContainer container = new FsBlobContainer( + new FsBlobStore(randomIntBetween(1, 8) * 1024, path, false), + BlobPath.cleanPath(), + path + ); + + if (limit >= 0) { + container.listBlobsByPrefixInSortedOrder(null, limit, blobNameSortOrder, new ActionListener<>() { + @Override + public void onResponse(List blobMetadata) { + int actualLimit = Math.min(limit, 10); + assertEquals(actualLimit, blobMetadata.size()); + + if (blobNameSortOrder == BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC) { + blobsInFileSystem.sort(String::compareTo); + } else { + blobsInFileSystem.sort(Collections.reverseOrder(String::compareTo)); + } + List keys = blobsInFileSystem.subList(0, actualLimit); + assertEquals(keys, blobMetadata.stream().map(BlobMetadata::name).collect(Collectors.toList())); + } + + @Override + public void onFailure(Exception e) { + fail("blobContainer.listBlobsByPrefixInLexicographicOrder failed with exception: " + e.getMessage()); + } + }); + } else { + assertThrows( + IllegalArgumentException.class, + () -> container.listBlobsByPrefixInSortedOrder(null, limit, blobNameSortOrder, new ActionListener<>() { + @Override + public void onResponse(List blobMetadata) {} + + @Override + public void onFailure(Exception e) {} + }) + ); + } + } + + public void testListBlobsByPrefixInLexicographicOrderWithNegativeLimit() throws IOException { + testListBlobsByPrefixInSortedOrder(-5, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC); + } + + public void testListBlobsByPrefixInLexicographicOrderWithZeroLimit() throws IOException { + testListBlobsByPrefixInSortedOrder(0, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC); + } + + public void testListBlobsByPrefixInLexicographicOrderWithLimitLessThanNumberOfRecords() throws IOException { + testListBlobsByPrefixInSortedOrder(8, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC); + } + + public void testListBlobsByPrefixInLexicographicOrderWithLimitNumberOfRecords() throws IOException { + testListBlobsByPrefixInSortedOrder(10, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC); + } + + public void testListBlobsByPrefixInLexicographicOrderWithLimitGreaterThanNumberOfRecords() throws IOException { + testListBlobsByPrefixInSortedOrder(12, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC); + } + static class MockFileSystemProvider extends FilterFileSystemProvider { final Consumer onRead; diff --git a/server/src/test/java/org/opensearch/common/settings/ScopedSettingsTests.java b/server/src/test/java/org/opensearch/common/settings/ScopedSettingsTests.java index 1dcb8ee00ebb2..0c5cece4249ef 100644 --- a/server/src/test/java/org/opensearch/common/settings/ScopedSettingsTests.java +++ b/server/src/test/java/org/opensearch/common/settings/ScopedSettingsTests.java @@ -1462,4 +1462,20 @@ public List getListValue(final List value) { ); } + public void testAddSettingsUpdateConsumer() { + Setting testSetting = Setting.intSetting("foo.bar", 1, Property.Dynamic, Property.NodeScope); + Setting testSetting2 = Setting.intSetting("foo.bar.baz", 1, Property.Dynamic, Property.NodeScope); + AbstractScopedSettings service = new ClusterSettings(Settings.EMPTY, new HashSet<>(Arrays.asList(testSetting, testSetting2))); + AtomicInteger consumer2 = new AtomicInteger(); + service.addSettingsUpdateConsumer(testSetting2, consumer2::set, (s) -> assertTrue(s > 0)); + Setting wrongKeySetting = Setting.intSetting("foo.bar.wrong", 1, Property.Dynamic, Property.NodeScope); + + expectThrows(SettingsException.class, () -> service.addSettingsUpdateConsumer(wrongKeySetting, consumer2::set, (i) -> { + if (i == 42) throw new AssertionError("wrong key"); + })); + + expectThrows(NullPointerException.class, () -> service.addSettingsUpdateConsumer(null, consumer2::set, (i) -> { + if (i == 42) throw new AssertionError("empty key"); + })); + } } diff --git a/server/src/test/java/org/opensearch/common/util/ReorganizingLongHashTests.java b/server/src/test/java/org/opensearch/common/util/ReorganizingLongHashTests.java new file mode 100644 index 0000000000000..259eab6c624bd --- /dev/null +++ b/server/src/test/java/org/opensearch/common/util/ReorganizingLongHashTests.java @@ -0,0 +1,146 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.util; + +import org.opensearch.test.OpenSearchTestCase; + +import java.util.HashMap; +import java.util.Map; + +public class ReorganizingLongHashTests extends OpenSearchTestCase { + + public void testFuzzy() { + Map reference = new HashMap<>(); + + try ( + ReorganizingLongHash h = new ReorganizingLongHash( + randomIntBetween(1, 100), // random capacity + 0.6f + randomFloat() * 0.39f, // random load factor to verify collision resolution + BigArrays.NON_RECYCLING_INSTANCE + ) + ) { + // Verify the behaviour of "add" and "find". + for (int i = 0; i < (1 << 20); i++) { + long key = randomLong() % (1 << 12); // roughly ~4% unique keys + if (reference.containsKey(key)) { + long expectedOrdinal = reference.get(key); + assertEquals(-1 - expectedOrdinal, h.add(key)); + assertEquals(expectedOrdinal, h.find(key)); + } else { + assertEquals(-1, h.find(key)); + reference.put(key, (long) reference.size()); + assertEquals((long) reference.get(key), h.add(key)); + } + } + + // Verify the behaviour of "get". + for (Map.Entry entry : reference.entrySet()) { + assertEquals((long) entry.getKey(), h.get(entry.getValue())); + } + + // Verify the behaviour of "size". + assertEquals(reference.size(), h.size()); + + // Verify the calculation of PSLs. + final long capacity = h.getTable().size(); + final long mask = capacity - 1; + for (long idx = 0; idx < h.getTable().size(); idx++) { + final long value = h.getTable().get(idx); + if (value != -1) { + final long homeIdx = h.hash(h.get((int) value)) & mask; + assertEquals((capacity + idx - homeIdx) & mask, value >>> 48); + } + } + } + } + + public void testRearrangement() { + try (ReorganizingLongHash h = new ReorganizingLongHash(4, 0.6f, BigArrays.NON_RECYCLING_INSTANCE) { + /** + * Overriding with an "identity" hash function to make it easier to reason about the placement + * of values in the hash table. The backing array of the hash table will have a size (8), + * i.e. nextPowerOfTwo(initialCapacity/loadFactor), so the bitmask will be (7). + * The ideal home slot of a key can then be defined as: (hash(key) & mask) = (key & 7). + */ + @Override + long hash(long key) { + return key; + } + }) { + /* + * Add key=0, hash=0, home_slot=0 + * + * Before: empty slot. + * â–¼ + * [ _ _ _ _ _ _ _ _ ] + * + * After: inserted [ordinal=0, psl=0] at the empty slot. + * [ 0 _ _ _ _ _ _ _ ] + */ + h.add(0); + assertEquals(encodeValue(0, 0, 0), h.getTable().get(0)); + + /* + * Add key=8, hash=8, home_slot=0 + * + * Before: occupied slot. + * â–¼ + * [ 0 _ _ _ _ _ _ _ ] + * + * After: inserted [ordinal=1, psl=0] at the existing slot, displaced [ordinal=0, psl=0], + * and re-inserted it at the next empty slot as [ordinal=0, psl=1]. + * [ 1 0 _ _ _ _ _ _ ] + */ + h.add(8); + assertEquals(encodeValue(0, 0, 1), h.getTable().get(0)); + assertEquals(encodeValue(1, 0, 0), h.getTable().get(1)); + + /* + * Add key=1, hash=1, home_slot=1 + * + * Before: occupied slot. + * â–¼ + * [ 1 0 _ _ _ _ _ _ ] + * + * After: inserted [ordinal=2, psl=0] at the existing slot, displaced [ordinal=0, psl=1], + * and re-inserted it at the next empty slot as [ordinal=0, psl=2]. + * [ 1 2 0 _ _ _ _ _ ] + */ + h.add(1); + assertEquals(encodeValue(0, 0, 1), h.getTable().get(0)); + assertEquals(encodeValue(0, 0, 2), h.getTable().get(1)); + assertEquals(encodeValue(2, 0, 0), h.getTable().get(2)); + + /* + * Add key=16, hash=16, home_slot=0 + * + * Before: occupied slot. + * â–¼ + * [ 1 2 0 _ _ _ _ _ ] + * + * After: inserted [ordinal=3, psl=0] at the existing slot, displaced [ordinal=1, psl=0] + * and re-inserted it at the next best slot. Repeated this for other displaced values + * until everything found an empty slot. + * [ 3 1 0 2 _ _ _ _ ] + */ + h.add(16); + assertEquals(encodeValue(0, 0, 3), h.getTable().get(0)); + assertEquals(encodeValue(1, 0, 1), h.getTable().get(1)); + assertEquals(encodeValue(2, 0, 0), h.getTable().get(2)); + assertEquals(encodeValue(2, 0, 2), h.getTable().get(3)); + } + } + + private static long encodeValue(long psl, long fingerprint, long ordinal) { + assert psl < (1L << 15); + assert fingerprint < (1L << 16); + assert ordinal < (1L << 32); + return (psl << 48) | (fingerprint << 32) | ordinal; + } +} diff --git a/server/src/test/java/org/opensearch/common/util/concurrent/BufferedAsyncIOProcessorTests.java b/server/src/test/java/org/opensearch/common/util/concurrent/BufferedAsyncIOProcessorTests.java index f058189f904a0..c38956f965ac3 100644 --- a/server/src/test/java/org/opensearch/common/util/concurrent/BufferedAsyncIOProcessorTests.java +++ b/server/src/test/java/org/opensearch/common/util/concurrent/BufferedAsyncIOProcessorTests.java @@ -58,7 +58,7 @@ public void testConsumerCanThrowExceptions() { scaledRandomIntBetween(1, 2024), threadContext, threadpool, - TimeValue.timeValueMillis(50) + () -> TimeValue.timeValueMillis(50) ) { @Override protected void write(List>> candidates) throws IOException { @@ -100,7 +100,7 @@ public void testPreserveThreadContext() throws InterruptedException { scaledRandomIntBetween(1, 2024), threadContext, threadpool, - TimeValue.timeValueMillis(100) + () -> TimeValue.timeValueMillis(100) ) { @Override protected void write(List>> candidates) throws IOException { @@ -156,7 +156,7 @@ public void testSlowConsumer() { scaledRandomIntBetween(1, 2024), threadContext, threadpool, - TimeValue.timeValueMillis(100) + () -> TimeValue.timeValueMillis(100) ) { @Override protected void write(List>> candidates) throws IOException { @@ -220,7 +220,7 @@ public void testConsecutiveWritesAtLeastBufferIntervalAway() throws InterruptedE scaledRandomIntBetween(1, 2024), threadContext, threadpool, - TimeValue.timeValueMillis(bufferIntervalMs) + () -> TimeValue.timeValueMillis(bufferIntervalMs) ) { @Override protected void write(List>> candidates) throws IOException { diff --git a/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java b/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java index 75a1d9ec62c82..713a70c6a7d3e 100644 --- a/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java +++ b/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java @@ -23,10 +23,7 @@ import java.io.IOException; import java.net.InetAddress; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; import java.nio.file.Path; -import java.security.AccessControlException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -40,6 +37,7 @@ import org.apache.logging.log4j.LogManager; import org.junit.After; import org.junit.Before; +import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.action.ActionModule; import org.opensearch.action.ActionModule.DynamicActionRegistry; @@ -52,7 +50,6 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.bytes.BytesReference; -import org.opensearch.common.io.PathUtils; import org.opensearch.common.io.stream.BytesStreamInput; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.io.stream.NamedWriteableRegistry; @@ -69,6 +66,7 @@ import org.opensearch.extensions.proto.ExtensionRequestProto; import org.opensearch.extensions.rest.RegisterRestActionsRequest; import org.opensearch.extensions.settings.RegisterCustomSettingsRequest; +import org.opensearch.extensions.ExtensionsSettings.Extension; import org.opensearch.identity.IdentityService; import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.plugins.ExtensionAwarePlugin; @@ -203,10 +201,37 @@ public void tearDown() throws Exception { ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); } - public void testDiscover() throws Exception { - Files.write(extensionDir.resolve("extensions.yml"), extensionsYmlLines, StandardCharsets.UTF_8); + public void testLoadExtensions() throws Exception { - ExtensionsManager extensionsManager = new ExtensionsManager(extensionDir, Set.of()); + Set> additionalSettings = extAwarePlugin.getExtensionSettings().stream().collect(Collectors.toSet()); + ExtensionScopedSettings extensionScopedSettings = new ExtensionScopedSettings(additionalSettings); + ExtensionsManager extensionsManager = new ExtensionsManager(additionalSettings); + ExtensionDependency dependentExtension = new ExtensionDependency("uniqueid0", Version.fromString("2.0.0")); + + Extension firstExtension = new Extension( + "firstExtension", + "uniqueid1", + "127.0.0.1", + "9300", + "0.0.7", + "3.0.0", + "3.0.0", + Collections.emptyList(), + extensionScopedSettings + ); + Extension secondExtension = new Extension( + "secondExtension", + "uniqueid2", + "127.0.0.1", + "9301", + "0.0.7", + "2.0.0", + "2.0.0", + List.of(dependentExtension), + extensionScopedSettings + ); + extensionsManager.loadExtension(firstExtension); + extensionsManager.loadExtension(secondExtension); List expectedExtensions = new ArrayList(); @@ -218,7 +243,7 @@ public void testDiscover() throws Exception { new DiscoveryExtensionNode( "firstExtension", "uniqueid1", - new TransportAddress(InetAddress.getByName("127.0.0.0"), 9300), + new TransportAddress(InetAddress.getByName("127.0.0.1"), 9300), new HashMap(), Version.fromString("3.0.0"), Version.fromString("3.0.0"), @@ -252,14 +277,37 @@ public void testDiscover() throws Exception { } } - public void testNonUniqueExtensionsDiscovery() throws Exception { - Path emptyExtensionDir = createTempDir(); - List nonUniqueYmlLines = extensionsYmlLines.stream() - .map(s -> s.replace("uniqueid2", "uniqueid1")) - .collect(Collectors.toList()); - Files.write(emptyExtensionDir.resolve("extensions.yml"), nonUniqueYmlLines, StandardCharsets.UTF_8); + public void testNonUniqueLoadedExtensions() throws Exception { - ExtensionsManager extensionsManager = new ExtensionsManager(emptyExtensionDir, Set.of()); + Extension firstExtension = new Extension( + "firstExtension", + "uniqueid1", + "127.0.0.0", + "9300", + "0.0.7", + "3.0.0", + "3.0.0", + Collections.emptyList(), + null + ); + Extension secondExtension = new Extension( + "secondExtension", + "uniqueid1", + "127.0.0.0", + "9300", + "0.0.7", + "3.0.0", + "3.0.0", + null, + null + ); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); + extensionsManager.loadExtension(firstExtension); + IOException exception = expectThrows(IOException.class, () -> extensionsManager.loadExtension(secondExtension)); + assertEquals( + "Duplicate uniqueId [uniqueid1]. Did not load extension: Extension [name=secondExtension, uniqueId=uniqueid1, hostAddress=127.0.0.0, port=9300, version=0.0.7, opensearchVersion=3.0.0, minimumCompatibleVersion=3.0.0]", + exception.getMessage() + ); List expectedExtensions = new ArrayList(); @@ -289,56 +337,15 @@ public void testNonUniqueExtensionsDiscovery() throws Exception { assertTrue(expectedExtensions.containsAll(emptyList())); } - public void testMissingRequiredFieldsInExtensionDiscovery() throws Exception { - Path emptyExtensionDir = createTempDir(); - ExtensionsManager extensionsManager; - List requiredFieldMissingYmlLines = extensionsYmlLines.stream() - .map(s -> s.replace(" minimumCompatibleVersion: '2.0.0'", "")) - .collect(Collectors.toList()); - Files.write(emptyExtensionDir.resolve("extensions.yml"), requiredFieldMissingYmlLines, StandardCharsets.UTF_8); + public void testMissingRequiredFieldsWhileLoadingExtension() throws Exception { - try (MockLogAppender mockLogAppender = MockLogAppender.createForLoggers(LogManager.getLogger(ExtensionsManager.class))) { + Extension firstExtension = new Extension("firstExtension", "uniqueid1", "127.0.0.0", "9300", "0.0.7", "3.0.0", "", null, null); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); - mockLogAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( - "Required field is missing in extensions.yml", - "org.opensearch.extensions.ExtensionsManager", - Level.WARN, - "loading extension has been failed because of exception : Extension is missing these required fields : [minimumCompatibleVersion]" - ) - ); - - extensionsManager = new ExtensionsManager(emptyExtensionDir, Set.of()); - - mockLogAppender.assertAllExpectationsMatched(); - } + IOException exception = expectThrows(IOException.class, () -> extensionsManager.loadExtension(firstExtension)); + assertEquals("Required field [minimum opensearch version] is missing in the request", exception.getMessage()); - List expectedExtensions = new ArrayList(); - - expectedExtensions.add( - new DiscoveryExtensionNode( - "firstExtension", - "uniqueid1", - new TransportAddress(InetAddress.getByName("127.0.0.0"), 9300), - new HashMap(), - Version.fromString("3.0.0"), - Version.fromString("3.0.0"), - Collections.emptyList() - ) - ); - assertEquals(expectedExtensions.size(), extensionsManager.getExtensionIdMap().values().size()); - for (DiscoveryExtensionNode extension : expectedExtensions) { - DiscoveryExtensionNode initializedExtension = extensionsManager.getExtensionIdMap().get(extension.getId()); - assertEquals(extension.getName(), initializedExtension.getName()); - assertEquals(extension.getId(), initializedExtension.getId()); - assertEquals(extension.getAddress(), initializedExtension.getAddress()); - assertEquals(extension.getAttributes(), initializedExtension.getAttributes()); - assertEquals(extension.getVersion(), initializedExtension.getVersion()); - assertEquals(extension.getMinimumCompatibleVersion(), initializedExtension.getMinimumCompatibleVersion()); - assertEquals(extension.getDependencies(), initializedExtension.getDependencies()); - } - assertTrue(expectedExtensions.containsAll(emptyList())); - assertTrue(expectedExtensions.containsAll(emptyList())); + assertEquals(0, extensionsManager.getExtensionIdMap().values().size()); } public void testDiscoveryExtension() throws Exception { @@ -389,49 +396,8 @@ public void testExtensionDependency() throws Exception { } } - public void testNonAccessibleDirectory() throws Exception { - AccessControlException e = expectThrows( - - AccessControlException.class, - () -> new ExtensionsManager(PathUtils.get(""), Set.of()) - ); - assertEquals("access denied (\"java.io.FilePermission\" \"\" \"read\")", e.getMessage()); - } - - public void testNoExtensionsFile() throws Exception { - Settings settings = Settings.builder().build(); - - try (MockLogAppender mockLogAppender = MockLogAppender.createForLoggers(LogManager.getLogger(ExtensionsManager.class))) { - - mockLogAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( - "No Extensions File Present", - "org.opensearch.extensions.ExtensionsManager", - Level.WARN, - "Extensions.yml file is not present. No extensions will be loaded." - ) - ); - - new ExtensionsManager(extensionDir, Set.of()); - - mockLogAppender.assertAllExpectationsMatched(); - } - } - - public void testEmptyExtensionsFile() throws Exception { - Path emptyExtensionDir = createTempDir(); - - List emptyExtensionsYmlLines = Arrays.asList(); - Files.write(emptyExtensionDir.resolve("extensions.yml"), emptyExtensionsYmlLines, StandardCharsets.UTF_8); - - Settings settings = Settings.builder().build(); - - expectThrows(IOException.class, () -> new ExtensionsManager(emptyExtensionDir, Set.of())); - } - public void testInitialize() throws Exception { - Files.write(extensionDir.resolve("extensions.yml"), extensionsYmlLines, StandardCharsets.UTF_8); - ExtensionsManager extensionsManager = new ExtensionsManager(extensionDir, Set.of()); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); initialize(extensionsManager); @@ -472,9 +438,8 @@ public void testInitialize() throws Exception { } public void testHandleRegisterRestActionsRequest() throws Exception { - Files.write(extensionDir.resolve("extensions.yml"), extensionsYmlLines, StandardCharsets.UTF_8); - ExtensionsManager extensionsManager = new ExtensionsManager(extensionDir, Set.of()); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; @@ -488,8 +453,7 @@ public void testHandleRegisterRestActionsRequest() throws Exception { } public void testHandleRegisterSettingsRequest() throws Exception { - Files.write(extensionDir.resolve("extensions.yml"), extensionsYmlLines, StandardCharsets.UTF_8); - ExtensionsManager extensionsManager = new ExtensionsManager(extensionDir, Set.of()); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; @@ -505,7 +469,7 @@ public void testHandleRegisterSettingsRequest() throws Exception { } public void testHandleRegisterRestActionsRequestWithInvalidMethod() throws Exception { - ExtensionsManager extensionsManager = new ExtensionsManager(extensionDir, Set.of()); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; @@ -520,7 +484,7 @@ public void testHandleRegisterRestActionsRequestWithInvalidMethod() throws Excep } public void testHandleRegisterRestActionsRequestWithInvalidDeprecatedMethod() throws Exception { - ExtensionsManager extensionsManager = new ExtensionsManager(extensionDir, Set.of()); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; @@ -535,7 +499,7 @@ public void testHandleRegisterRestActionsRequestWithInvalidDeprecatedMethod() th } public void testHandleRegisterRestActionsRequestWithInvalidUri() throws Exception { - ExtensionsManager extensionsManager = new ExtensionsManager(extensionDir, Set.of()); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; List actionsList = List.of("GET", "PUT /bar", "POST /baz"); @@ -549,7 +513,7 @@ public void testHandleRegisterRestActionsRequestWithInvalidUri() throws Exceptio } public void testHandleRegisterRestActionsRequestWithInvalidDeprecatedUri() throws Exception { - ExtensionsManager extensionsManager = new ExtensionsManager(extensionDir, Set.of()); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; List actionsList = List.of("GET /foo", "PUT /bar", "POST /baz"); @@ -563,7 +527,7 @@ public void testHandleRegisterRestActionsRequestWithInvalidDeprecatedUri() throw } public void testHandleExtensionRequest() throws Exception { - ExtensionsManager extensionsManager = new ExtensionsManager(extensionDir, Set.of()); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); initialize(extensionsManager); ExtensionRequest clusterStateRequest = new ExtensionRequest(ExtensionRequestProto.RequestType.REQUEST_EXTENSION_CLUSTER_STATE); @@ -717,9 +681,7 @@ public void testEnvironmentSettingsDefaultValue() throws Exception { } public void testAddSettingsUpdateConsumerRequest() throws Exception { - Path extensionDir = createTempDir(); - Files.write(extensionDir.resolve("extensions.yml"), extensionsYmlLines, StandardCharsets.UTF_8); - ExtensionsManager extensionsManager = new ExtensionsManager(extensionDir, Set.of()); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); initialize(extensionsManager); List> componentSettings = List.of( @@ -763,10 +725,7 @@ public void testAddSettingsUpdateConsumerRequest() throws Exception { } public void testHandleAddSettingsUpdateConsumerRequest() throws Exception { - - Path extensionDir = createTempDir(); - Files.write(extensionDir.resolve("extensions.yml"), extensionsYmlLines, StandardCharsets.UTF_8); - ExtensionsManager extensionsManager = new ExtensionsManager(extensionDir, Set.of()); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); initialize(extensionsManager); List> componentSettings = List.of( @@ -786,9 +745,7 @@ public void testHandleAddSettingsUpdateConsumerRequest() throws Exception { } public void testUpdateSettingsRequest() throws Exception { - Path extensionDir = createTempDir(); - Files.write(extensionDir.resolve("extensions.yml"), extensionsYmlLines, StandardCharsets.UTF_8); - ExtensionsManager extensionsManager = new ExtensionsManager(extensionDir, Set.of()); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); initialize(extensionsManager); Setting componentSetting = Setting.boolSetting("falseSetting", false, Property.Dynamic); @@ -817,7 +774,7 @@ public void testUpdateSettingsRequest() throws Exception { public void testRegisterHandler() throws Exception { - ExtensionsManager extensionsManager = new ExtensionsManager(extensionDir, Set.of()); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); TransportService mockTransportService = spy( new TransportService( @@ -842,43 +799,50 @@ public void testRegisterHandler() throws Exception { } - public void testIncompatibleExtensionRegistration() throws IOException, IllegalAccessException { - - try (MockLogAppender mockLogAppender = MockLogAppender.createForLoggers(LogManager.getLogger(ExtensionsManager.class))) { - - mockLogAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( - "Could not load extension with uniqueId", - "org.opensearch.extensions.ExtensionsManager", - Level.ERROR, - "Could not load extension with uniqueId uniqueid1 due to OpenSearchException[Extension minimumCompatibleVersion: 3.99.0 is greater than current" - ) - ); - - List incompatibleExtension = Arrays.asList( - "extensions:", - " - name: firstExtension", - " uniqueId: uniqueid1", - " hostAddress: '127.0.0.0'", - " port: '9300'", - " version: '0.0.7'", - " opensearchVersion: '3.0.0'", - " minimumCompatibleVersion: '3.99.0'" - ); - - Files.write(extensionDir.resolve("extensions.yml"), incompatibleExtension, StandardCharsets.UTF_8); - ExtensionsManager extensionsManager = new ExtensionsManager(extensionDir, Set.of()); - assertEquals(0, extensionsManager.getExtensionIdMap().values().size()); - mockLogAppender.assertAllExpectationsMatched(); - } + public void testIncompatibleExtensionRegistration() throws IOException { + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); + Extension firstExtension = new Extension( + "firstExtension", + "uniqueid1", + "127.0.0.0", + "9300", + "0.0.7", + "3.0.0", + "3.99.0", + List.of(), + null + ); + expectThrows(OpenSearchException.class, () -> extensionsManager.loadExtension(firstExtension)); + assertEquals(0, extensionsManager.getExtensionIdMap().values().size()); } public void testAdditionalExtensionSettingsForExtensionWithCustomSettingSet() throws Exception { - Files.write(extensionDir.resolve("extensions.yml"), extensionsYmlLines, StandardCharsets.UTF_8); + Setting customSetting = Setting.simpleString("custom_extension_setting", "custom_setting", Property.ExtensionScope); + ExtensionAwarePlugin extAwarePlugin = new ExtensionAwarePlugin() { + @Override + public List> getExtensionSettings() { + List> settings = new ArrayList>(); + settings.add(customSetting); + return settings; + } + }; Set> additionalSettings = extAwarePlugin.getExtensionSettings().stream().collect(Collectors.toSet()); + ExtensionScopedSettings extensionScopedSettings = new ExtensionScopedSettings(additionalSettings); + Extension firstExtension = new Extension( + "firstExtension", + "uniqueid1", + "127.0.0.0", + "9300", + "0.0.7", + "3.0.0", + "3.0.0", + List.of(), + extensionScopedSettings + ); - ExtensionsManager extensionsManager = new ExtensionsManager(extensionDir, additionalSettings); + ExtensionsManager extensionsManager = new ExtensionsManager(additionalSettings); + extensionsManager.loadExtension(firstExtension); DiscoveryExtensionNode extension = new DiscoveryExtensionNode( "firstExtension", @@ -900,11 +864,23 @@ public void testAdditionalExtensionSettingsForExtensionWithCustomSettingSet() th } public void testAdditionalExtensionSettingsForExtensionWithoutCustomSettingSet() throws Exception { - Files.write(extensionDir.resolve("extensions.yml"), extensionsYmlLines, StandardCharsets.UTF_8); Set> additionalSettings = extAwarePlugin.getExtensionSettings().stream().collect(Collectors.toSet()); + ExtensionScopedSettings extensionScopedSettings = new ExtensionScopedSettings(additionalSettings); + Extension firstExtension = new Extension( + "secondExtension", + "uniqueid2", + "127.0.0.0", + "9301", + "0.0.7", + "2.0.0", + "2.0.0", + List.of(), + extensionScopedSettings + ); - ExtensionsManager extensionsManager = new ExtensionsManager(extensionDir, additionalSettings); + ExtensionsManager extensionsManager = new ExtensionsManager(additionalSettings); + extensionsManager.loadExtension(firstExtension); DiscoveryExtensionNode extension = new DiscoveryExtensionNode( "secondExtension", diff --git a/server/src/test/java/org/opensearch/extensions/action/ExtensionActionUtilTests.java b/server/src/test/java/org/opensearch/extensions/action/ExtensionActionUtilTests.java new file mode 100644 index 0000000000000..d2b889d33da9a --- /dev/null +++ b/server/src/test/java/org/opensearch/extensions/action/ExtensionActionUtilTests.java @@ -0,0 +1,106 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.extensions.action; + +import org.junit.Before; +import org.mockito.Mockito; +import org.opensearch.action.ActionRequest; +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.io.stream.Writeable; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; + +import static org.opensearch.extensions.action.ExtensionActionUtil.UNIT_SEPARATOR; +import static org.opensearch.extensions.action.ExtensionActionUtil.createProxyRequestBytes; + +public class ExtensionActionUtilTests extends OpenSearchTestCase { + private byte[] myBytes; + private final String actionName = "org.opensearch.action.MyExampleRequest"; + private final byte[] actionNameBytes = MyExampleRequest.class.getName().getBytes(StandardCharsets.UTF_8); + + @Before + public void setup() throws IOException { + BytesStreamOutput out = new BytesStreamOutput(); + MyExampleRequest exampleRequest = new MyExampleRequest(actionName, actionNameBytes); + exampleRequest.writeTo(out); + + byte[] requestBytes = BytesReference.toBytes(out.bytes()); + byte[] requestClass = MyExampleRequest.class.getName().getBytes(StandardCharsets.UTF_8); + this.myBytes = ByteBuffer.allocate(requestClass.length + 1 + requestBytes.length) + .put(requestClass) + .put(UNIT_SEPARATOR) + .put(requestBytes) + .array(); + } + + public void testCreateProxyRequestBytes() throws IOException { + BytesStreamOutput out = new BytesStreamOutput(); + MyExampleRequest exampleRequest = new MyExampleRequest(actionName, actionNameBytes); + exampleRequest.writeTo(out); + + byte[] result = createProxyRequestBytes(exampleRequest); + assertArrayEquals(this.myBytes, result); + assertThrows(RuntimeException.class, () -> ExtensionActionUtil.createProxyRequestBytes(new MyExampleRequest(null, null))); + } + + public void testCreateActionRequest() throws ReflectiveOperationException { + ActionRequest actionRequest = ExtensionActionUtil.createActionRequest(myBytes); + assertThrows(NullPointerException.class, () -> ExtensionActionUtil.createActionRequest(null)); + assertThrows(ReflectiveOperationException.class, () -> ExtensionActionUtil.createActionRequest(actionNameBytes)); + assertNotNull(actionRequest); + assertFalse(actionRequest.getShouldStoreResult()); + } + + public void testConvertParamsToBytes() throws IOException { + Writeable mockWriteableObject = Mockito.mock(Writeable.class); + Mockito.doThrow(new IOException("Test IOException")).when(mockWriteableObject).writeTo(Mockito.any()); + assertThrows(IllegalStateException.class, () -> ExtensionActionUtil.convertParamsToBytes(mockWriteableObject)); + } + + public void testDelimPos() { + assertTrue(ExtensionActionUtil.delimPos(myBytes) > 0); + assertTrue(ExtensionActionUtil.delimPos(actionNameBytes) < 0); + assertEquals(-1, ExtensionActionUtil.delimPos(actionNameBytes)); + } + + private static class MyExampleRequest extends ActionRequest { + private final String param1; + private final byte[] param2; + + public MyExampleRequest(String param1, byte[] param2) { + this.param1 = param1; + this.param2 = param2; + } + + public MyExampleRequest(StreamInput in) throws IOException { + super(in); + param1 = in.readString(); + param2 = in.readByteArray(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(param1); + out.writeByteArray(param2); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + } +} diff --git a/server/src/test/java/org/opensearch/extensions/rest/RestInitializeExtensionActionTests.java b/server/src/test/java/org/opensearch/extensions/rest/RestInitializeExtensionActionTests.java new file mode 100644 index 0000000000000..7dd616c678e74 --- /dev/null +++ b/server/src/test/java/org/opensearch/extensions/rest/RestInitializeExtensionActionTests.java @@ -0,0 +1,235 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.extensions.rest; + +import java.util.Collections; +import java.util.List; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.function.Function; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.emptySet; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; + +import org.junit.After; +import org.junit.Before; +import org.mockito.Mockito; +import org.opensearch.Version; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.bytes.BytesArray; +import org.opensearch.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.PageCacheRecycler; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.extensions.ExtensionsManager; +import org.opensearch.extensions.ExtensionsSettings; +import org.opensearch.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.RestStatus; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.rest.FakeRestChannel; +import org.opensearch.test.rest.FakeRestRequest; +import org.opensearch.test.transport.MockTransportService; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; +import org.opensearch.transport.nio.MockNioTransport; + +public class RestInitializeExtensionActionTests extends OpenSearchTestCase { + + private TransportService transportService; + private MockNioTransport transport; + private final ThreadPool threadPool = new TestThreadPool(RestInitializeExtensionActionTests.class.getSimpleName()); + + @Before + public void setup() throws Exception { + Settings settings = Settings.builder().put("cluster.name", "test").build(); + transport = new MockNioTransport( + settings, + Version.CURRENT, + threadPool, + new NetworkService(Collections.emptyList()), + PageCacheRecycler.NON_RECYCLING_INSTANCE, + new NamedWriteableRegistry(Collections.emptyList()), + new NoneCircuitBreakerService() + ); + transportService = new MockTransportService( + settings, + transport, + threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, + (boundAddress) -> new DiscoveryNode( + "test_node", + "test_node", + boundAddress.publishAddress(), + emptyMap(), + emptySet(), + Version.CURRENT + ), + null, + Collections.emptySet() + ); + + } + + @Override + @After + public void tearDown() throws Exception { + super.tearDown(); + transportService.close(); + ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); + } + + public void testRestInitializeExtensionActionResponse() throws Exception { + ExtensionsManager extensionsManager = mock(ExtensionsManager.class); + RestInitializeExtensionAction restInitializeExtensionAction = new RestInitializeExtensionAction(extensionsManager); + final String content = "{\"name\":\"ad-extension\",\"uniqueId\":\"ad-extension\",\"hostAddress\":\"127.0.0.1\"," + + "\"port\":\"4532\",\"version\":\"1.0\",\"opensearchVersion\":\"" + + Version.CURRENT.toString() + + "\"," + + "\"minimumCompatibleVersion\":\"" + + Version.CURRENT.minimumCompatibilityVersion().toString() + + "\"}"; + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent(new BytesArray(content), XContentType.JSON) + .withMethod(RestRequest.Method.POST) + .build(); + + FakeRestChannel channel = new FakeRestChannel(request, false, 0); + restInitializeExtensionAction.handleRequest(request, channel, null); + + assertEquals(channel.capturedResponse().status(), RestStatus.ACCEPTED); + assertTrue(channel.capturedResponse().content().utf8ToString().contains("A request to initialize an extension has been sent.")); + } + + public void testRestInitializeExtensionActionFailure() throws Exception { + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); + RestInitializeExtensionAction restInitializeExtensionAction = new RestInitializeExtensionAction(extensionsManager); + + final String content = "{\"name\":\"ad-extension\",\"uniqueId\":\"\",\"hostAddress\":\"127.0.0.1\"," + + "\"port\":\"4532\",\"version\":\"1.0\",\"opensearchVersion\":\"" + + Version.CURRENT.toString() + + "\"," + + "\"minimumCompatibleVersion\":\"" + + Version.CURRENT.minimumCompatibilityVersion().toString() + + "\"}"; + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent(new BytesArray(content), XContentType.JSON) + .withMethod(RestRequest.Method.POST) + .build(); + + FakeRestChannel channel = new FakeRestChannel(request, false, 0); + restInitializeExtensionAction.handleRequest(request, channel, null); + + assertEquals(1, channel.errors().get()); + assertTrue( + channel.capturedResponse().content().utf8ToString().contains("Required field [extension uniqueId] is missing in the request") + ); + } + + public void testRestInitializeExtensionActionResponseWithAdditionalSettings() throws Exception { + Setting boolSetting = Setting.boolSetting("boolSetting", false, Setting.Property.ExtensionScope); + Setting stringSetting = Setting.simpleString("stringSetting", "default", Setting.Property.ExtensionScope); + Setting intSetting = Setting.intSetting("intSetting", 0, Setting.Property.ExtensionScope); + Setting listSetting = Setting.listSetting( + "listSetting", + List.of("first", "second", "third"), + Function.identity(), + Setting.Property.ExtensionScope + ); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(boolSetting, stringSetting, intSetting, listSetting)); + ExtensionsManager spy = spy(extensionsManager); + + // optionally, you can stub out some methods: + when(spy.getAdditionalSettings()).thenCallRealMethod(); + Mockito.doCallRealMethod().when(spy).loadExtension(any(ExtensionsSettings.Extension.class)); + Mockito.doNothing().when(spy).initialize(); + RestInitializeExtensionAction restInitializeExtensionAction = new RestInitializeExtensionAction(spy); + final String content = "{\"name\":\"ad-extension\",\"uniqueId\":\"ad-extension\",\"hostAddress\":\"127.0.0.1\"," + + "\"port\":\"4532\",\"version\":\"1.0\",\"opensearchVersion\":\"" + + Version.CURRENT.toString() + + "\"," + + "\"minimumCompatibleVersion\":\"" + + Version.CURRENT.minimumCompatibilityVersion().toString() + + "\",\"boolSetting\":true,\"stringSetting\":\"customSetting\",\"intSetting\":5,\"listSetting\":[\"one\",\"two\",\"three\"]}"; + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent(new BytesArray(content), XContentType.JSON) + .withMethod(RestRequest.Method.POST) + .build(); + + FakeRestChannel channel = new FakeRestChannel(request, false, 0); + restInitializeExtensionAction.handleRequest(request, channel, null); + + assertEquals(channel.capturedResponse().status(), RestStatus.ACCEPTED); + assertTrue(channel.capturedResponse().content().utf8ToString().contains("A request to initialize an extension has been sent.")); + + Optional extension = spy.lookupExtensionSettingsById("ad-extension"); + assertTrue(extension.isPresent()); + assertEquals(true, extension.get().getAdditionalSettings().get(boolSetting)); + assertEquals("customSetting", extension.get().getAdditionalSettings().get(stringSetting)); + assertEquals(5, extension.get().getAdditionalSettings().get(intSetting)); + + List listSettingValue = (List) extension.get().getAdditionalSettings().get(listSetting); + assertTrue(listSettingValue.contains("one")); + assertTrue(listSettingValue.contains("two")); + assertTrue(listSettingValue.contains("three")); + } + + public void testRestInitializeExtensionActionResponseWithAdditionalSettingsUsingDefault() throws Exception { + Setting boolSetting = Setting.boolSetting("boolSetting", false, Setting.Property.ExtensionScope); + Setting stringSetting = Setting.simpleString("stringSetting", "default", Setting.Property.ExtensionScope); + Setting intSetting = Setting.intSetting("intSetting", 0, Setting.Property.ExtensionScope); + Setting listSetting = Setting.listSetting( + "listSetting", + List.of("first", "second", "third"), + Function.identity(), + Setting.Property.ExtensionScope + ); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(boolSetting, stringSetting, intSetting, listSetting)); + ExtensionsManager spy = spy(extensionsManager); + + // optionally, you can stub out some methods: + when(spy.getAdditionalSettings()).thenCallRealMethod(); + Mockito.doCallRealMethod().when(spy).loadExtension(any(ExtensionsSettings.Extension.class)); + Mockito.doNothing().when(spy).initialize(); + RestInitializeExtensionAction restInitializeExtensionAction = new RestInitializeExtensionAction(spy); + final String content = "{\"name\":\"ad-extension\",\"uniqueId\":\"ad-extension\",\"hostAddress\":\"127.0.0.1\"," + + "\"port\":\"4532\",\"version\":\"1.0\",\"opensearchVersion\":\"" + + Version.CURRENT.toString() + + "\"," + + "\"minimumCompatibleVersion\":\"" + + Version.CURRENT.minimumCompatibilityVersion().toString() + + "\"}"; + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent(new BytesArray(content), XContentType.JSON) + .withMethod(RestRequest.Method.POST) + .build(); + + FakeRestChannel channel = new FakeRestChannel(request, false, 0); + restInitializeExtensionAction.handleRequest(request, channel, null); + + assertEquals(channel.capturedResponse().status(), RestStatus.ACCEPTED); + assertTrue(channel.capturedResponse().content().utf8ToString().contains("A request to initialize an extension has been sent.")); + + Optional extension = spy.lookupExtensionSettingsById("ad-extension"); + assertTrue(extension.isPresent()); + assertEquals(false, extension.get().getAdditionalSettings().get(boolSetting)); + assertEquals("default", extension.get().getAdditionalSettings().get(stringSetting)); + assertEquals(0, extension.get().getAdditionalSettings().get(intSetting)); + + List listSettingValue = (List) extension.get().getAdditionalSettings().get(listSetting); + assertTrue(listSettingValue.contains("first")); + assertTrue(listSettingValue.contains("second")); + assertTrue(listSettingValue.contains("third")); + } + +} diff --git a/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java b/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java index df047afb677d9..fe8792b36f048 100644 --- a/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java +++ b/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java @@ -42,6 +42,7 @@ import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.extensions.DiscoveryExtensionNode; +import org.opensearch.extensions.ExtensionsManager; import org.opensearch.extensions.action.ExtensionAction; import org.opensearch.extensions.action.ExtensionTransportAction; import org.opensearch.identity.IdentityService; @@ -49,7 +50,6 @@ import org.opensearch.rest.NamedRoute; import org.opensearch.rest.RestHandler.Route; import org.opensearch.rest.RestRequest.Method; -import org.opensearch.rest.extensions.RestSendToExtensionAction; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; @@ -118,7 +118,8 @@ public void setup() throws Exception { null, usageService, null, - new IdentityService(Settings.EMPTY, new ArrayList<>()) + new IdentityService(Settings.EMPTY, new ArrayList<>()), + new ExtensionsManager(Set.of()) ); dynamicActionRegistry = actionModule.getDynamicActionRegistry(); } diff --git a/server/src/test/java/org/opensearch/index/IndexModuleTests.java b/server/src/test/java/org/opensearch/index/IndexModuleTests.java index d9d87196ca289..32b8fb5a4dc62 100644 --- a/server/src/test/java/org/opensearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/opensearch/index/IndexModuleTests.java @@ -252,7 +252,7 @@ private IndexService newIndexService(IndexModule module) throws IOException { writableRegistry(), () -> false, null, - new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService), + new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService, threadPool), translogFactorySupplier ); } diff --git a/server/src/test/java/org/opensearch/index/IndexServiceTests.java b/server/src/test/java/org/opensearch/index/IndexServiceTests.java index 8b133b31e97b7..209b7eb4c5696 100644 --- a/server/src/test/java/org/opensearch/index/IndexServiceTests.java +++ b/server/src/test/java/org/opensearch/index/IndexServiceTests.java @@ -502,4 +502,24 @@ public void testUpdateSyncIntervalDynamically() { indexMetadata = client().admin().cluster().prepareState().execute().actionGet().getState().metadata().index("test"); assertEquals("20s", indexMetadata.getSettings().get(IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.getKey())); } + + public void testUpdateRemoteTranslogBufferIntervalDynamically() { + Settings settings = Settings.builder().put(IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), "10s").build(); + IndexService indexService = createIndex("test", settings); + ensureGreen("test"); + + Settings.Builder builder = Settings.builder().put(IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), "5s"); + client().admin().indices().prepareUpdateSettings("test").setSettings(builder).get(); + IndexMetadata indexMetadata = client().admin().cluster().prepareState().execute().actionGet().getState().metadata().index("test"); + assertEquals("5s", indexMetadata.getSettings().get(IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey())); + + client().admin().indices().prepareClose("test").get(); + client().admin() + .indices() + .prepareUpdateSettings("test") + .setSettings(Settings.builder().put(IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), "20s")) + .get(); + indexMetadata = client().admin().cluster().prepareState().execute().actionGet().getState().metadata().index("test"); + assertEquals("20s", indexMetadata.getSettings().get(IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey())); + } } diff --git a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java index bf3c6e15bde6e..490d54d778477 100644 --- a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java +++ b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java @@ -326,7 +326,7 @@ public void testIsWarmerEnabled() { } public void testRefreshInterval() { - String refreshInterval = getRandomTimeString(); + String refreshInterval = getRandomTimeString(false); IndexMetadata metadata = newIndexMeta( "index", Settings.builder() @@ -343,7 +343,7 @@ public void testRefreshInterval() { ), settings.getRefreshInterval() ); - String newRefreshInterval = getRandomTimeString(); + String newRefreshInterval = getRandomTimeString(false); settings.updateIndexMetadata( newIndexMeta("index", Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), newRefreshInterval).build()) ); @@ -357,8 +357,12 @@ public void testRefreshInterval() { ); } - private String getRandomTimeString() { - int refreshIntervalInt = randomFrom(-1, Math.abs(randomInt())); + private String getRandomTimeString(boolean nonNegativeOnly) { + int start = -1; + if (nonNegativeOnly) { + start = 0; + } + int refreshIntervalInt = randomFrom(start, Math.abs(randomInt())); String refreshInterval = Integer.toString(refreshIntervalInt); if (refreshIntervalInt >= 0) { refreshInterval += randomFrom("s", "ms", "h"); @@ -965,7 +969,7 @@ public void testRemoteTranslogExplicitSetting() { .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_ENABLED, true) .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, "tlog-store") - .put(IndexMetadata.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), "200ms") + .put(IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), "200ms") .build() ); IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY); @@ -1009,38 +1013,57 @@ public void testSetRemoteTranslogBufferIntervalDefaultSetting() { .put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), createdVersion) .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_ENABLED, true) .build(); - assertEquals(TimeValue.timeValueMillis(100), IndexMetadata.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(settings)); + assertEquals(TimeValue.timeValueMillis(650), IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(settings)); } - public void testSetRemoteTranslogBufferIntervalFailsWhenRemoteTranslogIsNotEnabled() { + public void testSetRemoteTranslogBufferIntervalFailsWhenEmpty() { Settings indexSettings = Settings.builder() .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_ENABLED, false) - .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_BUFFER_INTERVAL, "200ms") + .put(IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), "") .build(); IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, - () -> IndexMetadata.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(indexSettings) + () -> IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(indexSettings) ); assertEquals( - "Setting index.remote_store.translog.buffer_interval can only be set when index.remote_store.translog.enabled is set to true", + "failed to parse setting [index.remote_store.translog.buffer_interval] with value [] as a time value: unit is missing or unrecognized", iae.getMessage() ); } - public void testSetRemoteTranslogBufferIntervalFailsWhenEmpty() { - Settings indexSettings = Settings.builder() - .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) - .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_ENABLED, false) - .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_BUFFER_INTERVAL, "") - .build(); - IllegalArgumentException iae = expectThrows( - IllegalArgumentException.class, - () -> IndexMetadata.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(indexSettings) + public void testUpdateRemoteTranslogBufferInterval() { + String bufferInterval = getRandomTimeString(true); + IndexMetadata metadata = newIndexMeta( + "index", + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), bufferInterval) + .build() ); + IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY); assertEquals( - "failed to parse setting [index.remote_store.translog.buffer_interval] with value [] as a time value: unit is missing or unrecognized", - iae.getMessage() + TimeValue.parseTimeValue( + bufferInterval, + new TimeValue(1, TimeUnit.DAYS), + IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey() + ), + settings.getRemoteTranslogUploadBufferInterval() + ); + String newBufferInterval = getRandomTimeString(true); + settings.updateIndexMetadata( + newIndexMeta( + "index", + Settings.builder().put(IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), newBufferInterval).build() + ) + ); + assertEquals( + TimeValue.parseTimeValue( + newBufferInterval, + new TimeValue(1, TimeUnit.DAYS), + IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey() + ), + settings.getRemoteTranslogUploadBufferInterval() ); } diff --git a/server/src/test/java/org/opensearch/index/codec/CodecTests.java b/server/src/test/java/org/opensearch/index/codec/CodecTests.java index bc50525412954..016e785f8da13 100644 --- a/server/src/test/java/org/opensearch/index/codec/CodecTests.java +++ b/server/src/test/java/org/opensearch/index/codec/CodecTests.java @@ -47,6 +47,8 @@ import org.opensearch.env.Environment; import org.opensearch.index.IndexSettings; import org.opensearch.index.analysis.IndexAnalyzers; +import org.opensearch.index.codec.customcodecs.Lucene95CustomCodec; +import org.opensearch.index.codec.customcodecs.Lucene95CustomStoredFieldsFormat; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.similarity.SimilarityService; import org.opensearch.indices.mapper.MapperRegistry; @@ -63,40 +65,75 @@ public class CodecTests extends OpenSearchTestCase { public void testResolveDefaultCodecs() throws Exception { - CodecService codecService = createCodecService(); + CodecService codecService = createCodecService(false); assertThat(codecService.codec("default"), instanceOf(PerFieldMappingPostingFormatCodec.class)); assertThat(codecService.codec("default"), instanceOf(Lucene95Codec.class)); } public void testDefault() throws Exception { - Codec codec = createCodecService().codec("default"); + Codec codec = createCodecService(false).codec("default"); assertStoredFieldsCompressionEquals(Lucene95Codec.Mode.BEST_SPEED, codec); } public void testBestCompression() throws Exception { - Codec codec = createCodecService().codec("best_compression"); + Codec codec = createCodecService(false).codec("best_compression"); assertStoredFieldsCompressionEquals(Lucene95Codec.Mode.BEST_COMPRESSION, codec); } + public void testZstd() throws Exception { + Codec codec = createCodecService(false).codec("zstd"); + assertStoredFieldsCompressionEquals(Lucene95CustomCodec.Mode.ZSTD, codec); + } + + public void testZstdNoDict() throws Exception { + Codec codec = createCodecService(false).codec("zstd_no_dict"); + assertStoredFieldsCompressionEquals(Lucene95CustomCodec.Mode.ZSTD_NO_DICT, codec); + } + + public void testDefaultMapperServiceNull() throws Exception { + Codec codec = createCodecService(true).codec("default"); + assertStoredFieldsCompressionEquals(Lucene95Codec.Mode.BEST_SPEED, codec); + } + + public void testBestCompressionMapperServiceNull() throws Exception { + Codec codec = createCodecService(true).codec("best_compression"); + assertStoredFieldsCompressionEquals(Lucene95Codec.Mode.BEST_COMPRESSION, codec); + } + + public void testZstdMapperServiceNull() throws Exception { + Codec codec = createCodecService(true).codec("zstd"); + assertStoredFieldsCompressionEquals(Lucene95CustomCodec.Mode.ZSTD, codec); + } + + public void testZstdNoDictMapperServiceNull() throws Exception { + Codec codec = createCodecService(true).codec("zstd_no_dict"); + assertStoredFieldsCompressionEquals(Lucene95CustomCodec.Mode.ZSTD_NO_DICT, codec); + } + + public void testExceptionCodecNull() { + assertThrows(IllegalArgumentException.class, () -> createCodecService(true).codec(null)); + } + // write some docs with it, inspect .si to see this was the used compression private void assertStoredFieldsCompressionEquals(Lucene95Codec.Mode expected, Codec actual) throws Exception { - Directory dir = newDirectory(); - IndexWriterConfig iwc = newIndexWriterConfig(null); - iwc.setCodec(actual); - IndexWriter iw = new IndexWriter(dir, iwc); - iw.addDocument(new Document()); - iw.commit(); - iw.close(); - DirectoryReader ir = DirectoryReader.open(dir); - SegmentReader sr = (SegmentReader) ir.leaves().get(0).reader(); + SegmentReader sr = getSegmentReader(actual); String v = sr.getSegmentInfo().info.getAttribute(Lucene90StoredFieldsFormat.MODE_KEY); assertNotNull(v); assertEquals(expected, Lucene95Codec.Mode.valueOf(v)); - ir.close(); - dir.close(); } - private CodecService createCodecService() throws IOException { + private void assertStoredFieldsCompressionEquals(Lucene95CustomCodec.Mode expected, Codec actual) throws Exception { + SegmentReader sr = getSegmentReader(actual); + String v = sr.getSegmentInfo().info.getAttribute(Lucene95CustomStoredFieldsFormat.MODE_KEY); + assertNotNull(v); + assertEquals(expected, Lucene95CustomCodec.Mode.valueOf(v)); + } + + private CodecService createCodecService(boolean isMapperServiceNull) throws IOException { + + if (isMapperServiceNull) { + return new CodecService(null, LogManager.getLogger("test")); + } Settings nodeSettings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()).build(); IndexSettings settings = IndexSettingsModule.newIndexSettings("_na", nodeSettings); SimilarityService similarityService = new SimilarityService(settings, null, Collections.emptyMap()); @@ -115,4 +152,18 @@ private CodecService createCodecService() throws IOException { return new CodecService(service, LogManager.getLogger("test")); } + private SegmentReader getSegmentReader(Codec codec) throws IOException { + Directory dir = newDirectory(); + IndexWriterConfig iwc = newIndexWriterConfig(null); + iwc.setCodec(codec); + IndexWriter iw = new IndexWriter(dir, iwc); + iw.addDocument(new Document()); + iw.commit(); + iw.close(); + DirectoryReader ir = DirectoryReader.open(dir); + SegmentReader sr = (SegmentReader) ir.leaves().get(0).reader(); + ir.close(); + dir.close(); + return sr; + } } diff --git a/sandbox/plugins/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/AbstractCompressorTests.java b/server/src/test/java/org/opensearch/index/codec/customcodecs/AbstractCompressorTests.java similarity index 100% rename from sandbox/plugins/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/AbstractCompressorTests.java rename to server/src/test/java/org/opensearch/index/codec/customcodecs/AbstractCompressorTests.java index fcfb06ca6b050..cc794eb2c48f1 100644 --- a/sandbox/plugins/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/AbstractCompressorTests.java +++ b/server/src/test/java/org/opensearch/index/codec/customcodecs/AbstractCompressorTests.java @@ -8,21 +8,21 @@ package org.opensearch.index.codec.customcodecs; -import org.apache.lucene.tests.util.LineFileDocs; -import org.apache.lucene.tests.util.TestUtil; -import org.opensearch.test.OpenSearchTestCase; import org.apache.lucene.codecs.compressing.Compressor; import org.apache.lucene.codecs.compressing.Decompressor; import org.apache.lucene.store.ByteArrayDataInput; import org.apache.lucene.store.ByteBuffersDataInput; import org.apache.lucene.store.ByteBuffersDataOutput; +import org.apache.lucene.tests.util.LineFileDocs; +import org.apache.lucene.tests.util.TestUtil; import org.apache.lucene.util.BytesRef; +import org.opensearch.test.OpenSearchTestCase; -import java.util.List; -import java.nio.ByteBuffer; import java.io.ByteArrayOutputStream; import java.io.IOException; +import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; +import java.util.List; import java.util.Random; /** diff --git a/server/src/test/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormatTests.java b/server/src/test/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormatTests.java new file mode 100644 index 0000000000000..4f23450ce0b39 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormatTests.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.customcodecs; + +import org.opensearch.test.OpenSearchTestCase; + +public class Lucene95CustomStoredFieldsFormatTests extends OpenSearchTestCase { + + public void testDefaultLucene95CustomCodecMode() { + Lucene95CustomStoredFieldsFormat lucene95CustomStoredFieldsFormat = new Lucene95CustomStoredFieldsFormat(); + assertEquals(Lucene95CustomCodec.Mode.ZSTD, lucene95CustomStoredFieldsFormat.getMode()); + } + + public void testZstdNoDictLucene95CustomCodecMode() { + Lucene95CustomStoredFieldsFormat lucene95CustomStoredFieldsFormat = new Lucene95CustomStoredFieldsFormat( + Lucene95CustomCodec.Mode.ZSTD_NO_DICT + ); + assertEquals(Lucene95CustomCodec.Mode.ZSTD_NO_DICT, lucene95CustomStoredFieldsFormat.getMode()); + } + +} diff --git a/sandbox/plugins/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/ZstdCompressorTests.java b/server/src/test/java/org/opensearch/index/codec/customcodecs/ZstdCompressorTests.java similarity index 100% rename from sandbox/plugins/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/ZstdCompressorTests.java rename to server/src/test/java/org/opensearch/index/codec/customcodecs/ZstdCompressorTests.java diff --git a/sandbox/plugins/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressorTests.java b/server/src/test/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressorTests.java similarity index 100% rename from sandbox/plugins/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressorTests.java rename to server/src/test/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressorTests.java diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java new file mode 100644 index 0000000000000..5b9135afb66f3 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java @@ -0,0 +1,41 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import org.opensearch.test.OpenSearchTestCase; + +public class RemoteStoreUtilsTests extends OpenSearchTestCase { + + public void testInvertToStrInvalid() { + assertThrows(IllegalArgumentException.class, () -> RemoteStoreUtils.invertLong(-1)); + } + + public void testInvertToStrValid() { + assertEquals("9223372036854774573", RemoteStoreUtils.invertLong(1234)); + assertEquals("0000000000000001234", RemoteStoreUtils.invertLong(9223372036854774573L)); + } + + public void testInvertToLongInvalid() { + assertThrows(IllegalArgumentException.class, () -> RemoteStoreUtils.invertLong("-5")); + } + + public void testInvertToLongValid() { + assertEquals(1234, RemoteStoreUtils.invertLong("9223372036854774573")); + assertEquals(9223372036854774573L, RemoteStoreUtils.invertLong("0000000000000001234")); + } + + public void testinvert() { + assertEquals(0, RemoteStoreUtils.invertLong(RemoteStoreUtils.invertLong(0))); + assertEquals(Long.MAX_VALUE, RemoteStoreUtils.invertLong(RemoteStoreUtils.invertLong(Long.MAX_VALUE))); + for (int i = 0; i < 10; i++) { + long num = randomLongBetween(1, Long.MAX_VALUE); + assertEquals(num, RemoteStoreUtils.invertLong(RemoteStoreUtils.invertLong(num))); + } + } +} diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index 1c0ebf17285f7..58527dbea5791 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -37,6 +37,7 @@ import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.Term; +import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.search.ReferenceManager; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; @@ -49,6 +50,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Constants; import org.junit.Assert; +import org.opensearch.common.io.PathUtils; import org.opensearch.core.Assertions; import org.opensearch.OpenSearchException; import org.opensearch.Version; @@ -127,6 +129,7 @@ import org.opensearch.index.seqno.RetentionLeases; import org.opensearch.index.seqno.SeqNoStats; import org.opensearch.index.seqno.SequenceNumbers; +import org.opensearch.index.store.RemoteSegmentStoreDirectory; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreStats; import org.opensearch.index.store.StoreUtils; @@ -188,7 +191,7 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; import java.util.stream.Stream; - +import java.util.Collection; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static org.hamcrest.Matchers.containsInAnyOrder; @@ -2797,6 +2800,78 @@ public void restoreShard( closeShards(target); } + public void testSyncSegmentsFromGivenRemoteSegmentStore() throws IOException { + String remoteStorePath = createTempDir().toString(); + IndexShard source = newStartedShard( + true, + Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) + .put(IndexMetadata.SETTING_REMOTE_STORE_REPOSITORY, remoteStorePath + "__test") + .build(), + new InternalEngineFactory() + ); + indexDoc(source, "_doc", "1"); + indexDoc(source, "_doc", "2"); + source.refresh("test"); + assertDocs(source, "1", "2"); + indexDoc(source, "_doc", "3"); + source.refresh("test"); + flushShard(source); + + indexDoc(source, "_doc", "5"); + source.refresh("test"); + + indexDoc(source, "_doc", "4"); + source.refresh("test"); + + long primaryTerm; + long commitGeneration; + try (GatedCloseable segmentInfosGatedCloseable = source.getSegmentInfosSnapshot()) { + SegmentInfos segmentInfos = segmentInfosGatedCloseable.get(); + primaryTerm = source.getOperationPrimaryTerm(); + commitGeneration = segmentInfos.getGeneration(); + } + Collection lastCommitedSegmentsInSource = SegmentInfos.readLatestCommit(source.store().directory()).files(false); + + closeShards(source); + + RemoteSegmentStoreDirectory tempRemoteSegmentDirectory = createRemoteSegmentStoreDirectory( + source.shardId(), + PathUtils.get(remoteStorePath) + ); + + IndexShard target = newStartedShard( + true, + Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) + .build(), + new InternalEngineFactory() + ); + ShardRouting routing = ShardRoutingHelper.initWithSameId( + target.routingEntry(), + RecoverySource.ExistingStoreRecoverySource.INSTANCE + ); + routing = ShardRoutingHelper.newWithRestoreSource(routing, new RecoverySource.EmptyStoreRecoverySource()); + + target = reinitShard(target, routing); + + target.syncSegmentsFromGivenRemoteSegmentStore(false, tempRemoteSegmentDirectory, primaryTerm, commitGeneration); + RemoteSegmentStoreDirectory remoteStoreDirectory = ((RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) target + .remoteStore() + .directory()).getDelegate()).getDelegate()); + Collection uploadFiles = remoteStoreDirectory.getSegmentsUploadedToRemoteStore().keySet(); + assertTrue(uploadFiles.containsAll(lastCommitedSegmentsInSource)); + assertTrue( + "Failed to sync all files to new shard", + List.of(target.store().directory().listAll()).containsAll(lastCommitedSegmentsInSource) + ); + Directory storeDirectory = ((FilterDirectory) ((FilterDirectory) target.store().directory()).getDelegate()).getDelegate(); + ((BaseDirectoryWrapper) storeDirectory).setCheckIndexOnClose(false); + closeShards(target); + } + public void testRefreshLevelRestoreShardFromRemoteStore() throws IOException { testRestoreShardFromRemoteStore(false); } diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java index 1c2ddf43f8274..688f29fa1f4bf 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java @@ -282,7 +282,6 @@ public void testRefreshSuccessOnSecondAttempt() throws Exception { /** * Tests retry flow after snapshot and metadata files have been uploaded to remote store in the failed attempt. * Snapshot and metadata files created in failed attempt should not break retry. - * @throws Exception */ public void testRefreshSuccessAfterFailureInFirstAttemptAfterSnapshotAndMetadataUpload() throws Exception { int succeedOnAttempt = 1; diff --git a/server/src/test/java/org/opensearch/index/shard/ReplicaRecoveryWithRemoteTranslogOnPrimaryTests.java b/server/src/test/java/org/opensearch/index/shard/ReplicaRecoveryWithRemoteTranslogOnPrimaryTests.java index 7a2f67e206f74..aef90483b4bbd 100644 --- a/server/src/test/java/org/opensearch/index/shard/ReplicaRecoveryWithRemoteTranslogOnPrimaryTests.java +++ b/server/src/test/java/org/opensearch/index/shard/ReplicaRecoveryWithRemoteTranslogOnPrimaryTests.java @@ -14,6 +14,7 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.common.settings.Settings; +import org.opensearch.index.IndexSettings; import org.opensearch.index.engine.DocIdSeqNoAndSource; import org.opensearch.index.engine.NRTReplicationEngine; import org.opensearch.index.engine.NRTReplicationEngineFactory; @@ -36,7 +37,7 @@ public class ReplicaRecoveryWithRemoteTranslogOnPrimaryTests extends OpenSearchI .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_ENABLED, "true") .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, "translog-repo") - .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_BUFFER_INTERVAL, "100ms") + .put(IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), "100ms") .build(); public void testStartSequenceForReplicaRecovery() throws Exception { diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java index 7a9cbc12d823b..324315505987b 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java @@ -24,6 +24,7 @@ import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.test.IndexSettingsModule; import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ThreadPool; import java.io.IOException; import java.nio.file.Path; @@ -41,14 +42,16 @@ public class RemoteSegmentStoreDirectoryFactoryTests extends OpenSearchTestCase private Supplier repositoriesServiceSupplier; private RepositoriesService repositoriesService; + private ThreadPool threadPool; private RemoteSegmentStoreDirectoryFactory remoteSegmentStoreDirectoryFactory; @Before public void setup() { repositoriesServiceSupplier = mock(Supplier.class); repositoriesService = mock(RepositoriesService.class); + threadPool = mock(ThreadPool.class); when(repositoriesServiceSupplier.get()).thenReturn(repositoriesService); - remoteSegmentStoreDirectoryFactory = new RemoteSegmentStoreDirectoryFactory(repositoriesServiceSupplier); + remoteSegmentStoreDirectoryFactory = new RemoteSegmentStoreDirectoryFactory(repositoriesServiceSupplier, threadPool); } public void testNewDirectory() throws IOException { diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java index fec9b04d6e371..66e4b9a357b85 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java @@ -30,12 +30,14 @@ import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.lucene.store.ByteArrayIndexInput; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; import org.opensearch.index.store.lockmanager.RemoteStoreMetadataLockManager; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadataHandler; +import org.opensearch.threadpool.ThreadPool; import java.io.IOException; import java.nio.file.NoSuchFileException; @@ -45,6 +47,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.ExecutorService; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -56,6 +59,7 @@ import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.doReturn; +import static org.hamcrest.CoreMatchers.is; public class RemoteSegmentStoreDirectoryTests extends IndexShardTestCase { private RemoteDirectory remoteDataDirectory; @@ -65,21 +69,31 @@ public class RemoteSegmentStoreDirectoryTests extends IndexShardTestCase { private RemoteSegmentStoreDirectory remoteSegmentStoreDirectory; private IndexShard indexShard; private SegmentInfos segmentInfos; + private ThreadPool threadPool; @Before public void setup() throws IOException { remoteDataDirectory = mock(RemoteDirectory.class); remoteMetadataDirectory = mock(RemoteDirectory.class); mdLockManager = mock(RemoteStoreMetadataLockManager.class); + threadPool = mock(ThreadPool.class); - remoteSegmentStoreDirectory = new RemoteSegmentStoreDirectory(remoteDataDirectory, remoteMetadataDirectory, mdLockManager); + remoteSegmentStoreDirectory = new RemoteSegmentStoreDirectory( + remoteDataDirectory, + remoteMetadataDirectory, + mdLockManager, + threadPool + ); Settings indexSettings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, org.opensearch.Version.CURRENT).build(); + ExecutorService executorService = OpenSearchExecutors.newDirectExecutorService(); indexShard = newStartedShard(false, indexSettings, new NRTReplicationEngineFactory()); try (Store store = indexShard.store()) { segmentInfos = store.readLastCommittedSegmentsInfo(); } + + when(threadPool.executor(ThreadPool.Names.REMOTE_PURGE)).thenReturn(executorService); } @After @@ -228,7 +242,8 @@ private Map getDummyMetadata(String prefix, int commitGeneration * @return ByteArrayIndexInput: metadata file bytes with header and footer * @throws IOException IOException */ - private ByteArrayIndexInput createMetadataFileBytes(Map segmentFilesMap, long generation) throws IOException { + private ByteArrayIndexInput createMetadataFileBytes(Map segmentFilesMap, long generation, long primaryTerm) + throws IOException { ByteBuffersDataOutput byteBuffersIndexOutput = new ByteBuffersDataOutput(); segmentInfos.write(new ByteBuffersIndexOutput(byteBuffersIndexOutput, "", "")); byte[] byteArray = byteBuffersIndexOutput.toArrayCopy(); @@ -238,6 +253,7 @@ private ByteArrayIndexInput createMetadataFileBytes(Map segmentF CodecUtil.writeHeader(indexOutput, RemoteSegmentMetadata.METADATA_CODEC, RemoteSegmentMetadata.CURRENT_VERSION); indexOutput.writeMapOfStrings(segmentFilesMap); indexOutput.writeLong(generation); + indexOutput.writeLong(primaryTerm); indexOutput.writeLong(byteArray.length); indexOutput.writeBytes(byteArray, byteArray.length); CodecUtil.writeFooter(indexOutput); @@ -261,14 +277,14 @@ private Map> populateMetadata() throws IOException { ); when(remoteMetadataDirectory.openInput("metadata__1__5__abc", IOContext.DEFAULT)).thenReturn( - createMetadataFileBytes(metadataFilenameContentMapping.get("metadata__1__5__abc"), 1) + createMetadataFileBytes(metadataFilenameContentMapping.get("metadata__1__5__abc"), 1, 5) ); when(remoteMetadataDirectory.openInput("metadata__1__6__pqr", IOContext.DEFAULT)).thenReturn( - createMetadataFileBytes(metadataFilenameContentMapping.get("metadata__1__6__pqr"), 1) + createMetadataFileBytes(metadataFilenameContentMapping.get("metadata__1__6__pqr"), 1, 6) ); when(remoteMetadataDirectory.openInput("metadata__2__1__zxv", IOContext.DEFAULT)).thenReturn( - createMetadataFileBytes(metadataFilenameContentMapping.get("metadata__2__1__zxv"), 1), - createMetadataFileBytes(metadataFilenameContentMapping.get("metadata__2__1__zxv"), 1) + createMetadataFileBytes(metadataFilenameContentMapping.get("metadata__2__1__zxv"), 1, 2), + createMetadataFileBytes(metadataFilenameContentMapping.get("metadata__2__1__zxv"), 1, 2) ); return metadataFilenameContentMapping; @@ -503,7 +519,7 @@ public void testGetSegmentsUploadedToRemoteStore() throws IOException { ); when(remoteMetadataDirectory.openInput("metadata__1__5__abc", IOContext.DEFAULT)).thenReturn( - createMetadataFileBytes(metadataFilenameContentMapping.get("metadata__1__5__abc"), 1) + createMetadataFileBytes(metadataFilenameContentMapping.get("metadata__1__5__abc"), 1, 5) ); assert (remoteSegmentStoreDirectory.getSegmentsUploadedToRemoteStore(testPrimaryTerm, testGeneration).containsKey("segments_5")); @@ -577,7 +593,9 @@ public void testContainsFile() throws IOException { metadata.put("_0.cfe", "_0.cfe::_0.cfe__" + UUIDs.base64UUID() + "::1234::512"); metadata.put("_0.cfs", "_0.cfs::_0.cfs__" + UUIDs.base64UUID() + "::2345::1024"); - when(remoteMetadataDirectory.openInput("metadata__1__5__abc", IOContext.DEFAULT)).thenReturn(createMetadataFileBytes(metadata, 1)); + when(remoteMetadataDirectory.openInput("metadata__1__5__abc", IOContext.DEFAULT)).thenReturn( + createMetadataFileBytes(metadata, 1, 5) + ); remoteSegmentStoreDirectory.init(); @@ -762,41 +780,76 @@ public void testIncorrectChecksumCorruptIndexException() throws IOException { assertThrows(CorruptIndexException.class, () -> remoteSegmentStoreDirectory.init()); } - public void testDeleteStaleCommitsException() throws IOException { + public void testDeleteStaleCommitsException() throws Exception { + populateMetadata(); when(remoteMetadataDirectory.listFilesByPrefix(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX)).thenThrow( new IOException("Error reading") ); - assertThrows(IOException.class, () -> remoteSegmentStoreDirectory.deleteStaleSegments(5)); + // popluateMetadata() adds stub to return 3 metadata files + // We are passing lastNMetadataFilesToKeep=2 here to validate that in case of exception deleteFile is not + // invoked + remoteSegmentStoreDirectory.deleteStaleSegmentsAsync(2); + + assertBusy(() -> assertThat(remoteSegmentStoreDirectory.canDeleteStaleCommits.get(), is(true))); + verify(remoteMetadataDirectory, times(0)).deleteFile(any(String.class)); + } + + public void testDeleteStaleCommitsExceptionWhileScheduling() throws Exception { + populateMetadata(); + doThrow(new IllegalArgumentException()).when(threadPool).executor(any(String.class)); + + // popluateMetadata() adds stub to return 3 metadata files + // We are passing lastNMetadataFilesToKeep=2 here to validate that in case of exception deleteFile is not + // invoked + remoteSegmentStoreDirectory.deleteStaleSegmentsAsync(2); + + assertBusy(() -> assertThat(remoteSegmentStoreDirectory.canDeleteStaleCommits.get(), is(true))); + verify(remoteMetadataDirectory, times(0)).deleteFile(any(String.class)); + } + + public void testDeleteStaleCommitsWithDeletionAlreadyInProgress() throws Exception { + populateMetadata(); + remoteSegmentStoreDirectory.canDeleteStaleCommits.set(false); + + // popluateMetadata() adds stub to return 3 metadata files + // We are passing lastNMetadataFilesToKeep=2 here to validate that in case of exception deleteFile is not + // invoked + remoteSegmentStoreDirectory.deleteStaleSegmentsAsync(2); + + assertBusy(() -> assertThat(remoteSegmentStoreDirectory.canDeleteStaleCommits.get(), is(false))); + verify(remoteMetadataDirectory, times(0)).deleteFile(any(String.class)); } - public void testDeleteStaleCommitsWithinThreshold() throws IOException { + public void testDeleteStaleCommitsWithinThreshold() throws Exception { populateMetadata(); // popluateMetadata() adds stub to return 3 metadata files // We are passing lastNMetadataFilesToKeep=5 here so that none of the metadata files will be deleted - remoteSegmentStoreDirectory.deleteStaleSegments(5); + remoteSegmentStoreDirectory.deleteStaleSegmentsAsync(5); + assertBusy(() -> assertThat(remoteSegmentStoreDirectory.canDeleteStaleCommits.get(), is(true))); verify(remoteMetadataDirectory, times(0)).openInput(any(String.class), eq(IOContext.DEFAULT)); } - public void testDeleteStaleCommitsActualDelete() throws IOException { + public void testDeleteStaleCommitsActualDelete() throws Exception { Map> metadataFilenameContentMapping = populateMetadata(); remoteSegmentStoreDirectory.init(); // popluateMetadata() adds stub to return 3 metadata files // We are passing lastNMetadataFilesToKeep=2 here so that oldest 1 metadata file will be deleted - remoteSegmentStoreDirectory.deleteStaleSegments(2); + remoteSegmentStoreDirectory.deleteStaleSegmentsAsync(2); for (String metadata : metadataFilenameContentMapping.get("metadata__1__5__abc").values()) { String uploadedFilename = metadata.split(RemoteSegmentStoreDirectory.UploadedSegmentMetadata.SEPARATOR)[1]; verify(remoteDataDirectory).deleteFile(uploadedFilename); } ; + assertBusy(() -> assertThat(remoteSegmentStoreDirectory.canDeleteStaleCommits.get(), is(true))); verify(remoteMetadataDirectory).deleteFile("metadata__1__5__abc"); } - public void testDeleteStaleCommitsActualDeleteIOException() throws IOException { + public void testDeleteStaleCommitsActualDeleteIOException() throws Exception { Map> metadataFilenameContentMapping = populateMetadata(); remoteSegmentStoreDirectory.init(); @@ -809,17 +862,18 @@ public void testDeleteStaleCommitsActualDeleteIOException() throws IOException { doThrow(new IOException("Error")).when(remoteDataDirectory).deleteFile(segmentFileWithException); // popluateMetadata() adds stub to return 3 metadata files // We are passing lastNMetadataFilesToKeep=2 here so that oldest 1 metadata file will be deleted - remoteSegmentStoreDirectory.deleteStaleSegments(2); + remoteSegmentStoreDirectory.deleteStaleSegmentsAsync(2); for (String metadata : metadataFilenameContentMapping.get("metadata__1__5__abc").values()) { String uploadedFilename = metadata.split(RemoteSegmentStoreDirectory.UploadedSegmentMetadata.SEPARATOR)[1]; verify(remoteDataDirectory).deleteFile(uploadedFilename); } ; + assertBusy(() -> assertThat(remoteSegmentStoreDirectory.canDeleteStaleCommits.get(), is(true))); verify(remoteMetadataDirectory, times(0)).deleteFile("metadata__1__5__abc"); } - public void testDeleteStaleCommitsActualDeleteNoSuchFileException() throws IOException { + public void testDeleteStaleCommitsActualDeleteNoSuchFileException() throws Exception { Map> metadataFilenameContentMapping = populateMetadata(); remoteSegmentStoreDirectory.init(); @@ -832,13 +886,14 @@ public void testDeleteStaleCommitsActualDeleteNoSuchFileException() throws IOExc doThrow(new NoSuchFileException(segmentFileWithException)).when(remoteDataDirectory).deleteFile(segmentFileWithException); // popluateMetadata() adds stub to return 3 metadata files // We are passing lastNMetadataFilesToKeep=2 here so that oldest 1 metadata file will be deleted - remoteSegmentStoreDirectory.deleteStaleSegments(2); + remoteSegmentStoreDirectory.deleteStaleSegmentsAsync(2); for (String metadata : metadataFilenameContentMapping.get("metadata__1__5__abc").values()) { String uploadedFilename = metadata.split(RemoteSegmentStoreDirectory.UploadedSegmentMetadata.SEPARATOR)[1]; verify(remoteDataDirectory).deleteFile(uploadedFilename); } ; + assertBusy(() -> assertThat(remoteSegmentStoreDirectory.canDeleteStaleCommits.get(), is(true))); verify(remoteMetadataDirectory).deleteFile("metadata__1__5__abc"); } diff --git a/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheCleanerTests.java b/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheCleanerTests.java index 3924cd4ed1913..5bdba0513af23 100644 --- a/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheCleanerTests.java +++ b/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheCleanerTests.java @@ -48,7 +48,7 @@ public class FileCacheCleanerTests extends OpenSearchTestCase { ); private final FileCache fileCache = FileCacheFactory.createConcurrentLRUFileCache( - 1024 * 1024 * 1024, + 1024 * 1024, 1, new NoopCircuitBreaker(CircuitBreaker.REQUEST) ); diff --git a/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheTests.java b/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheTests.java index 43a5c04b59f83..02b6a48b6f48e 100644 --- a/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheTests.java +++ b/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheTests.java @@ -31,7 +31,6 @@ public class FileCacheTests extends OpenSearchTestCase { // but fatal to these tests private final static int CONCURRENCY_LEVEL = 16; private final static int MEGA_BYTES = 1024 * 1024; - private final static int GIGA_BYTES = 1024 * 1024 * 1024; private final static String FAKE_PATH_SUFFIX = "Suffix"; private Path path; @@ -66,15 +65,9 @@ private void createFile(String indexName, String shardId, String fileName) throw Files.write(filePath, "test-data".getBytes()); } - public void testCreateCacheWithSmallSegments() { - assertThrows(IllegalStateException.class, () -> { - FileCacheFactory.createConcurrentLRUFileCache(1000, CONCURRENCY_LEVEL, new NoopCircuitBreaker(CircuitBreaker.REQUEST)); - }); - } - // test get method public void testGet() { - FileCache fileCache = createFileCache(GIGA_BYTES); + FileCache fileCache = createFileCache(8 * MEGA_BYTES); for (int i = 0; i < 4; i++) { fileCache.put(createPath(Integer.toString(i)), new StubCachedIndexInput(8 * MEGA_BYTES)); } @@ -86,27 +79,27 @@ public void testGet() { public void testGetThrowException() { assertThrows(NullPointerException.class, () -> { - FileCache fileCache = createFileCache(GIGA_BYTES); + FileCache fileCache = createFileCache(MEGA_BYTES); fileCache.get(null); }); } public void testPutThrowException() { assertThrows(NullPointerException.class, () -> { - FileCache fileCache = createFileCache(GIGA_BYTES); + FileCache fileCache = createFileCache(MEGA_BYTES); fileCache.put(null, null); }); } public void testPutThrowCircuitBreakingException() { - FileCache fileCache = createCircuitBreakingFileCache(GIGA_BYTES); + FileCache fileCache = createCircuitBreakingFileCache(MEGA_BYTES); Path path = createPath("0"); assertThrows(CircuitBreakingException.class, () -> fileCache.put(path, new StubCachedIndexInput(8 * MEGA_BYTES))); assertNull(fileCache.get(path)); } public void testCompute() { - FileCache fileCache = createFileCache(GIGA_BYTES); + FileCache fileCache = createFileCache(MEGA_BYTES); Path path = createPath("0"); fileCache.put(path, new StubCachedIndexInput(8 * MEGA_BYTES)); fileCache.incRef(path); @@ -117,20 +110,20 @@ public void testCompute() { public void testComputeThrowException() { assertThrows(NullPointerException.class, () -> { - FileCache fileCache = createFileCache(GIGA_BYTES); + FileCache fileCache = createFileCache(MEGA_BYTES); fileCache.compute(null, null); }); } public void testComputeThrowCircuitBreakingException() { - FileCache fileCache = createCircuitBreakingFileCache(GIGA_BYTES); + FileCache fileCache = createCircuitBreakingFileCache(MEGA_BYTES); Path path = createPath("0"); assertThrows(CircuitBreakingException.class, () -> fileCache.compute(path, (p, i) -> new StubCachedIndexInput(8 * MEGA_BYTES))); assertNull(fileCache.get(path)); } public void testRemove() { - FileCache fileCache = createFileCache(GIGA_BYTES); + FileCache fileCache = createFileCache(MEGA_BYTES); for (int i = 0; i < 4; i++) { fileCache.put(createPath(Integer.toString(i)), new StubCachedIndexInput(8 * MEGA_BYTES)); } @@ -145,13 +138,13 @@ public void testRemove() { public void testRemoveThrowException() { assertThrows(NullPointerException.class, () -> { - FileCache fileCache = createFileCache(GIGA_BYTES); + FileCache fileCache = createFileCache(MEGA_BYTES); fileCache.remove(null); }); } public void testIncDecRef() { - FileCache fileCache = createFileCache(GIGA_BYTES); + FileCache fileCache = createFileCache(MEGA_BYTES); for (int i = 0; i < 4; i++) { fileCache.put(createPath(Integer.toString(i)), new StubCachedIndexInput(8 * MEGA_BYTES)); } @@ -184,7 +177,7 @@ public void testIncDecRef() { public void testIncRefThrowException() { assertThrows(NullPointerException.class, () -> { - FileCache fileCache = createFileCache(GIGA_BYTES); + FileCache fileCache = createFileCache(MEGA_BYTES); fileCache.incRef(null); }); @@ -192,19 +185,19 @@ public void testIncRefThrowException() { public void testDecRefThrowException() { assertThrows(NullPointerException.class, () -> { - FileCache fileCache = createFileCache(GIGA_BYTES); + FileCache fileCache = createFileCache(MEGA_BYTES); fileCache.decRef(null); }); } public void testCapacity() { - FileCache fileCache = createFileCache(GIGA_BYTES); - assertEquals(fileCache.capacity(), GIGA_BYTES); + FileCache fileCache = createFileCache(MEGA_BYTES); + assertEquals(fileCache.capacity(), MEGA_BYTES); } public void testSize() { - FileCache fileCache = createFileCache(GIGA_BYTES); + FileCache fileCache = createFileCache(MEGA_BYTES); for (int i = 0; i < 4; i++) { fileCache.put(createPath(Integer.toString(i)), new StubCachedIndexInput(8 * MEGA_BYTES)); } @@ -213,34 +206,34 @@ public void testSize() { } public void testPrune() { - FileCache fileCache = createFileCache(GIGA_BYTES); + FileCache fileCache = createFileCache(MEGA_BYTES); for (int i = 0; i < 4; i++) { putAndDecRef(fileCache, i, 8 * MEGA_BYTES); } // before prune - assertEquals(fileCache.size(), 4); + assertTrue(fileCache.size() >= 1); fileCache.prune(); // after prune - assertEquals(fileCache.size(), 0); + assertEquals(0, fileCache.size()); } public void testPruneWithPredicate() { - FileCache fileCache = createFileCache(GIGA_BYTES); + FileCache fileCache = createFileCache(MEGA_BYTES); for (int i = 0; i < 4; i++) { putAndDecRef(fileCache, i, 8 * MEGA_BYTES); } // before prune - assertEquals(fileCache.size(), 4); + assertTrue(fileCache.size() >= 1); // after prune with false predicate fileCache.prune(path -> false); - assertEquals(fileCache.size(), 4); + assertTrue(fileCache.size() >= 1); // after prune with true predicate fileCache.prune(path -> true); - assertEquals(fileCache.size(), 0); + assertEquals(0, fileCache.size()); } public void testUsage() { @@ -258,7 +251,7 @@ public void testUsage() { } public void testStats() { - FileCache fileCache = createFileCache(GIGA_BYTES); + FileCache fileCache = createFileCache(MEGA_BYTES); for (int i = 0; i < 4; i++) { fileCache.put(createPath(Integer.toString(i)), new StubCachedIndexInput(8 * MEGA_BYTES)); } @@ -284,7 +277,7 @@ public void testCacheRestore() throws IOException { String indexName = "test-index"; String shardId = "0"; createFile(indexName, shardId, "test.0"); - FileCache fileCache = createFileCache(GIGA_BYTES); + FileCache fileCache = createFileCache(MEGA_BYTES); assertEquals(0, fileCache.usage().usage()); Path fileCachePath = path.resolve(NodeEnvironment.CACHE_FOLDER).resolve(indexName).resolve(shardId); fileCache.restoreFromDirectory(List.of(fileCachePath)); diff --git a/server/src/test/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandlerTests.java b/server/src/test/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandlerTests.java index 3bf7781fb909f..cc0764a6700b1 100644 --- a/server/src/test/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandlerTests.java +++ b/server/src/test/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandlerTests.java @@ -61,6 +61,7 @@ public void testReadContentNoSegmentInfos() throws IOException { Map expectedOutput = getDummyData(); indexOutput.writeMapOfStrings(expectedOutput); indexOutput.writeLong(1234); + indexOutput.writeLong(1234); indexOutput.writeLong(0); indexOutput.writeBytes(new byte[0], 0); indexOutput.close(); @@ -77,6 +78,7 @@ public void testReadContentWithSegmentInfos() throws IOException { Map expectedOutput = getDummyData(); indexOutput.writeMapOfStrings(expectedOutput); indexOutput.writeLong(1234); + indexOutput.writeLong(1234); ByteBuffersIndexOutput segmentInfosOutput = new ByteBuffersIndexOutput(new ByteBuffersDataOutput(), "test", "resource"); segmentInfos.write(segmentInfosOutput); byte[] segmentInfosBytes = segmentInfosOutput.toArrayCopy(); @@ -103,6 +105,7 @@ public void testWriteContent() throws IOException { RemoteSegmentMetadata remoteSegmentMetadata = new RemoteSegmentMetadata( RemoteSegmentMetadata.fromMapOfStrings(expectedOutput), segmentInfosBytes, + 1234, 1234 ); remoteSegmentMetadataHandler.writeContent(indexOutput, remoteSegmentMetadata); @@ -113,6 +116,7 @@ public void testWriteContent() throws IOException { ); assertEquals(expectedOutput, metadata.toMapOfStrings()); assertEquals(1234, metadata.getGeneration()); + assertEquals(1234, metadata.getPrimaryTerm()); assertArrayEquals(segmentInfosBytes, metadata.getSegmentInfosBytes()); } diff --git a/server/src/test/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandlerTests.java b/server/src/test/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandlerTests.java index ff251f42ab21b..96022315743c2 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandlerTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandlerTests.java @@ -10,6 +10,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; +import org.opensearch.index.IndexSettings; import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.replication.OpenSearchIndexLevelReplicationTestCase; import org.opensearch.index.seqno.ReplicationTracker; @@ -23,7 +24,7 @@ public class RemoteStorePeerRecoverySourceHandlerTests extends OpenSearchIndexLe .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_ENABLED, "true") .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, "translog-repo") - .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_BUFFER_INTERVAL, "100ms") + .put(IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), "100ms") .build(); public void testReplicaShardRecoveryUptoLastFlushedCommit() throws Exception { diff --git a/server/src/test/java/org/opensearch/ingest/CompoundProcessorTests.java b/server/src/test/java/org/opensearch/ingest/CompoundProcessorTests.java index b299ac4d66996..76301acac0c19 100644 --- a/server/src/test/java/org/opensearch/ingest/CompoundProcessorTests.java +++ b/server/src/test/java/org/opensearch/ingest/CompoundProcessorTests.java @@ -33,6 +33,7 @@ package org.opensearch.ingest; import org.opensearch.OpenSearchException; +import org.opensearch.common.metrics.OperationStats; import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; @@ -433,10 +434,10 @@ private void assertStats(CompoundProcessor compoundProcessor, long count, long f } private void assertStats(int processor, CompoundProcessor compoundProcessor, long current, long count, long failed, long time) { - IngestStats.Stats stats = compoundProcessor.getProcessorsWithMetrics().get(processor).v2().createStats(); - assertThat(stats.getIngestCount(), equalTo(count)); - assertThat(stats.getIngestCurrent(), equalTo(current)); - assertThat(stats.getIngestFailedCount(), equalTo(failed)); - assertThat(stats.getIngestTimeInMillis(), equalTo(time)); + OperationStats stats = compoundProcessor.getProcessorsWithMetrics().get(processor).v2().createStats(); + assertThat(stats.getCount(), equalTo(count)); + assertThat(stats.getCurrent(), equalTo(current)); + assertThat(stats.getFailedCount(), equalTo(failed)); + assertThat(stats.getTotalTimeInMillis(), equalTo(time)); } } diff --git a/server/src/test/java/org/opensearch/ingest/ConditionalProcessorTests.java b/server/src/test/java/org/opensearch/ingest/ConditionalProcessorTests.java index a383ab9b97918..921ac10c02862 100644 --- a/server/src/test/java/org/opensearch/ingest/ConditionalProcessorTests.java +++ b/server/src/test/java/org/opensearch/ingest/ConditionalProcessorTests.java @@ -32,6 +32,7 @@ package org.opensearch.ingest; +import org.opensearch.common.metrics.OperationStats; import org.opensearch.common.settings.Settings; import org.opensearch.script.IngestConditionalScript; import org.opensearch.script.MockScriptEngine; @@ -250,10 +251,10 @@ private static void assertMutatingCtxThrows(Consumer> mutati } private static void assertStats(ConditionalProcessor conditionalProcessor, long count, long failed, long time) { - IngestStats.Stats stats = conditionalProcessor.getMetric().createStats(); - assertThat(stats.getIngestCount(), equalTo(count)); - assertThat(stats.getIngestCurrent(), equalTo(0L)); - assertThat(stats.getIngestFailedCount(), equalTo(failed)); - assertThat(stats.getIngestTimeInMillis(), greaterThanOrEqualTo(time)); + OperationStats stats = conditionalProcessor.getMetric().createStats(); + assertThat(stats.getCount(), equalTo(count)); + assertThat(stats.getCurrent(), equalTo(0L)); + assertThat(stats.getFailedCount(), equalTo(failed)); + assertThat(stats.getTotalTimeInMillis(), greaterThanOrEqualTo(time)); } } diff --git a/server/src/test/java/org/opensearch/ingest/IngestServiceTests.java b/server/src/test/java/org/opensearch/ingest/IngestServiceTests.java index 4176a32e32ad3..19fef468c529e 100644 --- a/server/src/test/java/org/opensearch/ingest/IngestServiceTests.java +++ b/server/src/test/java/org/opensearch/ingest/IngestServiceTests.java @@ -58,6 +58,7 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.SetOnce; import org.opensearch.common.bytes.BytesArray; +import org.opensearch.common.metrics.OperationStats; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.core.xcontent.XContentBuilder; @@ -1739,14 +1740,14 @@ private void assertPipelineStats(List pipelineStats, S assertStats(getPipelineStats(pipelineStats, pipelineId), count, failed, time); } - private void assertStats(IngestStats.Stats stats, long count, long failed, long time) { - assertThat(stats.getIngestCount(), equalTo(count)); - assertThat(stats.getIngestCurrent(), equalTo(0L)); - assertThat(stats.getIngestFailedCount(), equalTo(failed)); - assertThat(stats.getIngestTimeInMillis(), greaterThanOrEqualTo(time)); + private void assertStats(OperationStats stats, long count, long failed, long time) { + assertThat(stats.getCount(), equalTo(count)); + assertThat(stats.getCurrent(), equalTo(0L)); + assertThat(stats.getFailedCount(), equalTo(failed)); + assertThat(stats.getTotalTimeInMillis(), greaterThanOrEqualTo(time)); } - private IngestStats.Stats getPipelineStats(List pipelineStats, String id) { + private OperationStats getPipelineStats(List pipelineStats, String id) { return pipelineStats.stream().filter(p1 -> p1.getPipelineId().equals(id)).findFirst().map(p2 -> p2.getStats()).orElse(null); } } diff --git a/server/src/test/java/org/opensearch/ingest/IngestStatsTests.java b/server/src/test/java/org/opensearch/ingest/IngestStatsTests.java index b5c74f0ee5d16..b17e24ee5424d 100644 --- a/server/src/test/java/org/opensearch/ingest/IngestStatsTests.java +++ b/server/src/test/java/org/opensearch/ingest/IngestStatsTests.java @@ -35,6 +35,7 @@ import org.opensearch.common.collect.MapBuilder; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.metrics.OperationStats; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -48,7 +49,7 @@ public class IngestStatsTests extends OpenSearchTestCase { public void testSerialization() throws IOException { - IngestStats.Stats totalStats = new IngestStats.Stats(50, 100, 200, 300); + OperationStats totalStats = new OperationStats(50, 100, 200, 300); List pipelineStats = createPipelineStats(); Map> processorStats = createProcessorStats(pipelineStats); IngestStats ingestStats = new IngestStats(totalStats, pipelineStats, processorStats); @@ -57,20 +58,20 @@ public void testSerialization() throws IOException { } private List createPipelineStats() { - IngestStats.PipelineStat pipeline1Stats = new IngestStats.PipelineStat("pipeline1", new IngestStats.Stats(3, 3, 3, 3)); - IngestStats.PipelineStat pipeline2Stats = new IngestStats.PipelineStat("pipeline2", new IngestStats.Stats(47, 97, 197, 297)); - IngestStats.PipelineStat pipeline3Stats = new IngestStats.PipelineStat("pipeline3", new IngestStats.Stats(0, 0, 0, 0)); + IngestStats.PipelineStat pipeline1Stats = new IngestStats.PipelineStat("pipeline1", new OperationStats(3, 3, 3, 3)); + IngestStats.PipelineStat pipeline2Stats = new IngestStats.PipelineStat("pipeline2", new OperationStats(47, 97, 197, 297)); + IngestStats.PipelineStat pipeline3Stats = new IngestStats.PipelineStat("pipeline3", new OperationStats(0, 0, 0, 0)); return Stream.of(pipeline1Stats, pipeline2Stats, pipeline3Stats).collect(Collectors.toList()); } private Map> createProcessorStats(List pipelineStats) { assert (pipelineStats.size() >= 2); - IngestStats.ProcessorStat processor1Stat = new IngestStats.ProcessorStat("processor1", "type", new IngestStats.Stats(1, 1, 1, 1)); - IngestStats.ProcessorStat processor2Stat = new IngestStats.ProcessorStat("processor2", "type", new IngestStats.Stats(2, 2, 2, 2)); + IngestStats.ProcessorStat processor1Stat = new IngestStats.ProcessorStat("processor1", "type", new OperationStats(1, 1, 1, 1)); + IngestStats.ProcessorStat processor2Stat = new IngestStats.ProcessorStat("processor2", "type", new OperationStats(2, 2, 2, 2)); IngestStats.ProcessorStat processor3Stat = new IngestStats.ProcessorStat( "processor3", "type", - new IngestStats.Stats(47, 97, 197, 297) + new OperationStats(47, 97, 197, 297) ); // pipeline1 -> processor1,processor2; pipeline2 -> processor3 return MapBuilder.>newMapBuilder() @@ -132,14 +133,14 @@ private void assertIngestStats( } - private void assertStats(IngestStats.Stats fromObject, IngestStats.Stats fromStream) { - assertEquals(fromObject.getIngestCount(), fromStream.getIngestCount()); - assertEquals(fromObject.getIngestFailedCount(), fromStream.getIngestFailedCount()); - assertEquals(fromObject.getIngestTimeInMillis(), fromStream.getIngestTimeInMillis()); - assertEquals(fromObject.getIngestCurrent(), fromStream.getIngestCurrent()); + private void assertStats(OperationStats fromObject, OperationStats fromStream) { + assertEquals(fromObject.getCount(), fromStream.getCount()); + assertEquals(fromObject.getFailedCount(), fromStream.getFailedCount()); + assertEquals(fromObject.getTotalTimeInMillis(), fromStream.getTotalTimeInMillis()); + assertEquals(fromObject.getCurrent(), fromStream.getCurrent()); } - private IngestStats.Stats getPipelineStats(List pipelineStats, String id) { + private OperationStats getPipelineStats(List pipelineStats, String id) { return pipelineStats.stream().filter(p1 -> p1.getPipelineId().equals(id)).findFirst().map(p2 -> p2.getStats()).orElse(null); } } diff --git a/server/src/test/java/org/opensearch/ingest/PipelineProcessorTests.java b/server/src/test/java/org/opensearch/ingest/PipelineProcessorTests.java index 9f8dda15eeb65..3708b5bc32955 100644 --- a/server/src/test/java/org/opensearch/ingest/PipelineProcessorTests.java +++ b/server/src/test/java/org/opensearch/ingest/PipelineProcessorTests.java @@ -32,6 +32,7 @@ package org.opensearch.ingest; import org.opensearch.OpenSearchException; +import org.opensearch.common.metrics.OperationStats; import org.opensearch.script.ScriptService; import org.opensearch.script.TemplateScript; import org.opensearch.test.OpenSearchTestCase; @@ -192,29 +193,29 @@ public void testPipelineProcessorWithPipelineChain() throws Exception { assertNotNull(ingestDocument.getSourceAndMetadata().get(key1)); // check the stats - IngestStats.Stats pipeline1Stats = pipeline1.getMetrics().createStats(); - IngestStats.Stats pipeline2Stats = pipeline2.getMetrics().createStats(); - IngestStats.Stats pipeline3Stats = pipeline3.getMetrics().createStats(); + OperationStats pipeline1Stats = pipeline1.getMetrics().createStats(); + OperationStats pipeline2Stats = pipeline2.getMetrics().createStats(); + OperationStats pipeline3Stats = pipeline3.getMetrics().createStats(); // current - assertThat(pipeline1Stats.getIngestCurrent(), equalTo(0L)); - assertThat(pipeline2Stats.getIngestCurrent(), equalTo(0L)); - assertThat(pipeline3Stats.getIngestCurrent(), equalTo(0L)); + assertThat(pipeline1Stats.getCurrent(), equalTo(0L)); + assertThat(pipeline2Stats.getCurrent(), equalTo(0L)); + assertThat(pipeline3Stats.getCurrent(), equalTo(0L)); // count - assertThat(pipeline1Stats.getIngestCount(), equalTo(1L)); - assertThat(pipeline2Stats.getIngestCount(), equalTo(1L)); - assertThat(pipeline3Stats.getIngestCount(), equalTo(1L)); + assertThat(pipeline1Stats.getCount(), equalTo(1L)); + assertThat(pipeline2Stats.getCount(), equalTo(1L)); + assertThat(pipeline3Stats.getCount(), equalTo(1L)); // time - assertThat(pipeline1Stats.getIngestTimeInMillis(), equalTo(0L)); - assertThat(pipeline2Stats.getIngestTimeInMillis(), equalTo(3L)); - assertThat(pipeline3Stats.getIngestTimeInMillis(), equalTo(2L)); + assertThat(pipeline1Stats.getTotalTimeInMillis(), equalTo(0L)); + assertThat(pipeline2Stats.getTotalTimeInMillis(), equalTo(3L)); + assertThat(pipeline3Stats.getTotalTimeInMillis(), equalTo(2L)); // failure - assertThat(pipeline1Stats.getIngestFailedCount(), equalTo(0L)); - assertThat(pipeline2Stats.getIngestFailedCount(), equalTo(0L)); - assertThat(pipeline3Stats.getIngestFailedCount(), equalTo(1L)); + assertThat(pipeline1Stats.getFailedCount(), equalTo(0L)); + assertThat(pipeline2Stats.getFailedCount(), equalTo(0L)); + assertThat(pipeline3Stats.getFailedCount(), equalTo(1L)); } public void testIngestPipelineMetadata() { diff --git a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java index 43d371bf5a187..f5295bead19a4 100644 --- a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java +++ b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java @@ -58,6 +58,7 @@ import org.opensearch.index.mapper.MapperService; import org.opensearch.index.shard.ShardId; import org.opensearch.index.snapshots.IndexShardSnapshotStatus; +import org.opensearch.index.snapshots.blobstore.RemoteStoreShardShallowCopySnapshot; import org.opensearch.index.store.Store; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; @@ -342,6 +343,15 @@ public void restoreShard( } + @Override + public RemoteStoreShardShallowCopySnapshot getRemoteStoreShallowCopyShardMetadata( + SnapshotId snapshotId, + IndexId indexId, + ShardId snapshotShardId + ) { + return null; + } + @Override public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, IndexId indexId, ShardId shardId) { return null; diff --git a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java index 8e0ee6b16ed48..105ccef500ce8 100644 --- a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java +++ b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java @@ -50,7 +50,12 @@ import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.Environment; import org.opensearch.index.IndexModule; +import org.opensearch.index.IndexService; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.shard.ShardId; +import org.opensearch.index.snapshots.blobstore.RemoteStoreShardShallowCopySnapshot; import org.opensearch.index.store.RemoteBufferedOutputDirectory; +import org.opensearch.indices.IndicesService; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.plugins.Plugin; @@ -117,11 +122,7 @@ protected void assertSnapshotOrGenericThread() { @Override protected Settings nodeSettings() { - return Settings.builder() - .put(super.nodeSettings()) - .put(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL, "true") - .put(FeatureFlags.REMOTE_STORE, "true") - .build(); + return Settings.builder().put(super.nodeSettings()).put(FeatureFlags.REMOTE_STORE, "true").build(); } public void testRetrieveSnapshots() throws Exception { @@ -326,12 +327,89 @@ public void testRetrieveShallowCopySnapshotCase1() throws IOException { final RepositoriesService repositoriesService = getInstanceFromNode(RepositoriesService.class); final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(snapshotRepositoryName); - List snapshotIds = OpenSearchBlobStoreRepositoryIntegTestCase.getRepositoryData(repository) - .getSnapshotIds() + RepositoryData repositoryData = OpenSearchBlobStoreRepositoryIntegTestCase.getRepositoryData(repository); + IndexId indexId = repositoryData.resolveIndexId(remoteStoreIndexName); + + List snapshotIds = repositoryData.getSnapshotIds() .stream() .sorted((s1, s2) -> s1.getName().compareTo(s2.getName())) .collect(Collectors.toList()); assertThat(snapshotIds, equalTo(originalSnapshots)); + + // shallow copy shard metadata - getRemoteStoreShallowCopyShardMetadata + RemoteStoreShardShallowCopySnapshot shardShallowCopySnapshot = repository.getRemoteStoreShallowCopyShardMetadata( + snapshotId2, + indexId, + new ShardId(remoteStoreIndexName, indexId.getId(), 0) + ); + assertEquals(shardShallowCopySnapshot.getRemoteStoreRepository(), remoteStoreRepositoryName); + } + + public void testGetRemoteStoreShallowCopyShardMetadata() throws IOException { + FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); + final Client client = client(); + final String snapshotRepositoryName = "test-repo"; + final String remoteStoreRepositoryName = "test-rs-repo"; + + logger.info("--> creating snapshot repository"); + + Settings snapshotRepoSettings = Settings.builder() + .put(node().settings()) + .put("location", OpenSearchIntegTestCase.randomRepoPath(node().settings())) + .build(); + createRepository(client, snapshotRepositoryName, snapshotRepoSettings); + + logger.info("--> creating remote store repository"); + Settings remoteStoreRepoSettings = Settings.builder() + .put(node().settings()) + .put("location", OpenSearchIntegTestCase.randomRepoPath(node().settings())) + .build(); + createRepository(client, remoteStoreRepositoryName, remoteStoreRepoSettings); + + logger.info("--> creating a remote store enabled index and indexing documents"); + final String remoteStoreIndexName = "test-rs-idx"; + Settings indexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepositoryName); + createIndex(remoteStoreIndexName, indexSettings); + indexDocuments(client, remoteStoreIndexName); + + logger.info("--> create remote index shallow snapshot"); + Settings snapshotRepoSettingsForShallowCopy = Settings.builder() + .put(snapshotRepoSettings) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), Boolean.TRUE) + .build(); + updateRepository(client, snapshotRepositoryName, snapshotRepoSettingsForShallowCopy); + + CreateSnapshotResponse createSnapshotResponse = client.admin() + .cluster() + .prepareCreateSnapshot(snapshotRepositoryName, "test-snap-2") + .setWaitForCompletion(true) + .setIndices(remoteStoreIndexName) + .get(); + final SnapshotId snapshotId = createSnapshotResponse.getSnapshotInfo().snapshotId(); + + String[] lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); + assert (lockFiles.length == 1) : "there should be only one lock file, but found " + Arrays.toString(lockFiles); + assert lockFiles[0].endsWith(snapshotId.getUUID() + ".lock"); + + final RepositoriesService repositoriesService = getInstanceFromNode(RepositoriesService.class); + final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(snapshotRepositoryName); + RepositoryData repositoryData = OpenSearchBlobStoreRepositoryIntegTestCase.getRepositoryData(repository); + IndexSettings indexSetting = getIndexSettings(remoteStoreIndexName); + IndexId indexId = repositoryData.resolveIndexId(remoteStoreIndexName); + RemoteStoreShardShallowCopySnapshot shardShallowCopySnapshot = repository.getRemoteStoreShallowCopyShardMetadata( + snapshotId, + indexId, + new ShardId(remoteStoreIndexName, indexSetting.getUUID(), 0) + ); + assertEquals(shardShallowCopySnapshot.getRemoteStoreRepository(), remoteStoreRepositoryName); + assertEquals(shardShallowCopySnapshot.getIndexUUID(), indexSetting.getUUID()); + assertEquals(shardShallowCopySnapshot.getRepositoryBasePath(), ""); + } + + private IndexSettings getIndexSettings(String indexName) { + final IndicesService indicesService = getInstanceFromNode(IndicesService.class); + final IndexService indexService = indicesService.indexService(resolveIndex(indexName)); + return indexService.getIndexSettings(); } // Validate Scenario remoteStoreShallowCopy Snapshot -> remoteStoreShallowCopy Snapshot diff --git a/server/src/test/java/org/opensearch/search/SearchServiceTests.java b/server/src/test/java/org/opensearch/search/SearchServiceTests.java index 8f8789a3a0323..74ef289c4b75f 100644 --- a/server/src/test/java/org/opensearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/opensearch/search/SearchServiceTests.java @@ -1748,4 +1748,21 @@ public void testCanMatchSearchAfterDescEqualMin() throws IOException { primarySort.order(SortOrder.DESC); assertEquals(SearchService.canMatchSearchAfter(searchAfter, minMax, primarySort), true); } + + /** + * Test canMatchSearchAfter with missing value, even if min/max is out of range + * Min = 0L, Max = 9L, search_after = -1L + * Expected result is canMatch = true + */ + public void testCanMatchSearchAfterWithMissing() throws IOException { + FieldDoc searchAfter = new FieldDoc(0, 0, new Long[] { -1L }); + MinAndMax minMax = new MinAndMax(0L, 9L); + FieldSortBuilder primarySort = new FieldSortBuilder("test"); + primarySort.order(SortOrder.DESC); + // Should be false without missing values + assertEquals(SearchService.canMatchSearchAfter(searchAfter, minMax, primarySort), false); + primarySort.missing("_last"); + // Should be true with missing values + assertEquals(SearchService.canMatchSearchAfter(searchAfter, minMax, primarySort), true); + } } diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregatorTests.java index 75ad9e12e0776..a2792114e9529 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregatorTests.java @@ -28,6 +28,10 @@ import org.opensearch.common.network.InetAddresses; import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; +import org.opensearch.common.util.BigArrays; +import org.opensearch.common.util.MockPageCacheRecycler; +import org.opensearch.index.IndexService; +import org.opensearch.index.cache.IndexCache; import org.opensearch.index.mapper.BooleanFieldMapper; import org.opensearch.index.mapper.DateFieldMapper; import org.opensearch.index.mapper.GeoPointFieldMapper; @@ -35,22 +39,32 @@ import org.opensearch.index.mapper.KeywordFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.NumberFieldMapper; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.script.MockScriptEngine; import org.opensearch.script.Script; import org.opensearch.script.ScriptEngine; import org.opensearch.script.ScriptModule; import org.opensearch.script.ScriptService; import org.opensearch.script.ScriptType; +import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.AggregationBuilder; +import org.opensearch.search.aggregations.Aggregator; +import org.opensearch.search.aggregations.AggregatorFactories; import org.opensearch.search.aggregations.AggregatorTestCase; import org.opensearch.search.aggregations.BucketOrder; +import org.opensearch.search.aggregations.CardinalityUpperBound; +import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.metrics.InternalMax; import org.opensearch.search.aggregations.metrics.MaxAggregationBuilder; import org.opensearch.search.aggregations.support.CoreValuesSourceType; import org.opensearch.search.aggregations.support.MultiTermsValuesSourceConfig; import org.opensearch.search.aggregations.support.ValueType; import org.opensearch.search.aggregations.support.ValuesSourceType; +import org.opensearch.search.internal.SearchContext; import org.opensearch.search.lookup.LeafDocLookup; +import org.opensearch.test.TestSearchContext; import java.io.IOException; import java.util.ArrayList; @@ -58,6 +72,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.UUID; import java.util.function.Consumer; import java.util.function.Function; @@ -68,8 +83,12 @@ import static java.util.stream.Collectors.toList; import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class MultiTermsAggregatorTests extends AggregatorTestCase { private static final String FIELD_NAME = "field"; @@ -852,6 +871,56 @@ public void testIncludeExclude() throws IOException { ); } + public void testEmptyAggregations() throws IOException { + QueryShardContext queryShardContext = mock(QueryShardContext.class); + IndexShard indexShard = mock(IndexShard.class); + BigArrays bigArrays = new BigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService(), ""); + IndexService indexService = mock(IndexService.class); + when(indexService.getShardOrNull(0)).thenReturn(indexShard); + IndexCache cache = mock(IndexCache.class); + when(cache.bitsetFilterCache()).thenReturn(null); + when(indexService.cache()).thenReturn(cache); + SearchContext context = new TestSearchContext(bigArrays, indexService); + when(indexService.newQueryShardContext(0, null, () -> 0L, null)).thenReturn(queryShardContext); + AggregatorFactories factories = AggregatorFactories.EMPTY; + boolean showTermDocCountError = true; + MultiTermsAggregator.InternalValuesSource internalValuesSources = mock(MultiTermsAggregator.InternalValuesSource.class); + DocValueFormat format = mock(DocValueFormat.class); + BucketOrder order = mock(BucketOrder.class); + Aggregator.SubAggCollectionMode collectMode = Aggregator.SubAggCollectionMode.BREADTH_FIRST; + TermsAggregator.BucketCountThresholds bucketCountThresholds = mock(TermsAggregator.BucketCountThresholds.class); + Aggregator parent = mock(Aggregator.class); + CardinalityUpperBound cardinality = CardinalityUpperBound.ONE; + Map metadata = new HashMap<>(); + String k1 = UUID.randomUUID().toString(); + String v1 = UUID.randomUUID().toString(); + metadata.put(k1, v1); + + MultiTermsAggregator mAgg = new MultiTermsAggregator( + AGG_NAME, + factories, + showTermDocCountError, + List.of(internalValuesSources), + List.of(format), + order, + collectMode, + bucketCountThresholds, + context, + parent, + cardinality, + metadata + ); + InternalAggregation emptyAgg = mAgg.buildEmptyAggregation(); + + MatcherAssert.assertThat(emptyAgg.getName(), equalTo(AGG_NAME)); + MatcherAssert.assertThat(emptyAgg, instanceOf(InternalMultiTerms.class)); + + InternalMultiTerms mt = (InternalMultiTerms) emptyAgg; + MatcherAssert.assertThat(mt.getMetadata().keySet(), contains(k1)); + MatcherAssert.assertThat(mt.getMetadata().get(k1), equalTo(v1)); + MatcherAssert.assertThat(mt.getBuckets(), empty()); + } + private void testAggregation( Query query, List terms, diff --git a/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineServiceTests.java b/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineServiceTests.java index d49d9fd41031c..2ac0b2136ddd9 100644 --- a/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineServiceTests.java +++ b/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineServiceTests.java @@ -10,13 +10,22 @@ import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; +import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TotalHits; import org.junit.Before; import org.opensearch.OpenSearchParseException; import org.opensearch.ResourceNotFoundException; import org.opensearch.Version; import org.opensearch.action.search.DeleteSearchPipelineRequest; +import org.opensearch.action.search.MockSearchPhaseContext; import org.opensearch.action.search.PutSearchPipelineRequest; +import org.opensearch.action.search.QueryPhaseResultConsumer; +import org.opensearch.action.search.SearchPhaseContext; +import org.opensearch.action.search.SearchPhaseController; +import org.opensearch.action.search.SearchPhaseName; +import org.opensearch.action.search.SearchPhaseResults; +import org.opensearch.action.search.SearchProgressListener; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.SearchResponseSections; @@ -28,9 +37,14 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.breaker.CircuitBreaker; +import org.opensearch.common.breaker.NoopCircuitBreaker; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.lucene.search.TopDocsAndMaxScore; +import org.opensearch.common.metrics.OperationStats; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.concurrent.AtomicArray; import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.IndexSettings; @@ -39,7 +53,10 @@ import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHits; import org.opensearch.search.SearchModule; +import org.opensearch.search.SearchPhaseResult; import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.search.query.QuerySearchResult; +import org.opensearch.test.InternalAggregationTestCase; import org.opensearch.test.MockLogAppender; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.ThreadPool; @@ -67,6 +84,13 @@ public Map> getRequestProcesso public Map> getResponseProcessors(Processor.Parameters parameters) { return Map.of("bar", (factories, tag, description, config) -> null); } + + @Override + public Map> getSearchPhaseResultsProcessors( + Processor.Parameters parameters + ) { + return Map.of("zoe", (factories, tag, description, config) -> null); + } }; private ThreadPool threadPool; @@ -177,13 +201,13 @@ public void testResolveIndexDefaultPipeline() throws Exception { SearchRequest searchRequest = new SearchRequest("my_index").source(SearchSourceBuilder.searchSource().size(5)); PipelinedRequest pipelinedRequest = service.resolvePipeline(searchRequest); assertEquals("p1", pipelinedRequest.getPipeline().getId()); - assertEquals(10, pipelinedRequest.transformedRequest().source().size()); + assertEquals(10, pipelinedRequest.source().size()); // Bypass the default pipeline searchRequest.pipeline("_none"); pipelinedRequest = service.resolvePipeline(searchRequest); assertEquals("_none", pipelinedRequest.getPipeline().getId()); - assertEquals(5, pipelinedRequest.transformedRequest().source().size()); + assertEquals(5, pipelinedRequest.source().size()); } private static abstract class FakeProcessor implements Processor { @@ -243,6 +267,40 @@ public SearchResponse processResponse(SearchRequest request, SearchResponse resp } } + private static class FakeSearchPhaseResultsProcessor extends FakeProcessor implements SearchPhaseResultsProcessor { + private Consumer querySearchResultConsumer; + + public FakeSearchPhaseResultsProcessor( + String type, + String tag, + String description, + Consumer querySearchResultConsumer + ) { + super(type, tag, description); + this.querySearchResultConsumer = querySearchResultConsumer; + } + + @Override + public void process( + SearchPhaseResults searchPhaseResult, + SearchPhaseContext searchPhaseContext + ) { + List resultAtomicArray = searchPhaseResult.getAtomicArray().asList(); + // updating the maxScore + resultAtomicArray.forEach(querySearchResultConsumer); + } + + @Override + public SearchPhaseName getBeforePhase() { + return SearchPhaseName.QUERY; + } + + @Override + public SearchPhaseName getAfterPhase() { + return SearchPhaseName.FETCH; + } + } + private SearchPipelineService createWithProcessors() { Map> requestProcessors = new HashMap<>(); requestProcessors.put("scale_request_size", (processorFactories, tag, description, config) -> { @@ -259,7 +317,15 @@ private SearchPipelineService createWithProcessors() { float score = ((Number) config.remove("score")).floatValue(); return new FakeResponseProcessor("fixed_score", tag, description, rsp -> rsp.getHits().forEach(h -> h.score(score))); }); - return createWithProcessors(requestProcessors, responseProcessors); + + Map> searchPhaseProcessors = new HashMap<>(); + searchPhaseProcessors.put("max_score", (processorFactories, tag, description, config) -> { + final float finalScore = config.containsKey("score") ? ((Number) config.remove("score")).floatValue() : 100f; + final Consumer querySearchResultConsumer = (result) -> result.queryResult().topDocs().maxScore = finalScore; + return new FakeSearchPhaseResultsProcessor("max_score", tag, description, querySearchResultConsumer); + }); + + return createWithProcessors(requestProcessors, responseProcessors, searchPhaseProcessors); } @Override @@ -270,7 +336,8 @@ protected NamedWriteableRegistry writableRegistry() { private SearchPipelineService createWithProcessors( Map> requestProcessors, - Map> responseProcessors + Map> responseProcessors, + Map> phaseProcessors ) { Client client = mock(Client.class); ThreadPool threadPool = mock(ThreadPool.class); @@ -295,6 +362,14 @@ public Map> getRequestProcesso public Map> getResponseProcessors(Processor.Parameters parameters) { return responseProcessors; } + + @Override + public Map> getSearchPhaseResultsProcessors( + Processor.Parameters parameters + ) { + return phaseProcessors; + } + }), client, true @@ -313,7 +388,8 @@ public void testUpdatePipelines() { new BytesArray( "{ " + "\"request_processors\" : [ { \"scale_request_size\": { \"scale\" : 2 } } ], " - + "\"response_processors\" : [ { \"fixed_score\" : { \"score\" : 1.0 } } ]" + + "\"response_processors\" : [ { \"fixed_score\" : { \"score\" : 1.0 } } ]," + + "\"phase_results_processors\" : [ { \"max_score\" : { \"score\": 100 } } ]" + "}" ), XContentType.JSON @@ -331,6 +407,11 @@ public void testUpdatePipelines() { "scale_request_size", searchPipelineService.getPipelines().get("_id").pipeline.getSearchRequestProcessors().get(0).getType() ); + assertEquals(1, searchPipelineService.getPipelines().get("_id").pipeline.getSearchPhaseResultsProcessors().size()); + assertEquals( + "max_score", + searchPipelineService.getPipelines().get("_id").pipeline.getSearchPhaseResultsProcessors().get(0).getType() + ); assertEquals(1, searchPipelineService.getPipelines().get("_id").pipeline.getSearchResponseProcessors().size()); assertEquals( "fixed_score", @@ -368,6 +449,7 @@ public void testPutPipeline() { assertEquals("empty pipeline", pipeline.pipeline.getDescription()); assertEquals(0, pipeline.pipeline.getSearchRequestProcessors().size()); assertEquals(0, pipeline.pipeline.getSearchResponseProcessors().size()); + assertEquals(0, pipeline.pipeline.getSearchPhaseResultsProcessors().size()); } public void testPutInvalidPipeline() throws IllegalAccessException { @@ -505,17 +587,14 @@ public void testTransformRequest() throws Exception { SearchRequest request = new SearchRequest("_index").source(sourceBuilder).pipeline("p1"); PipelinedRequest pipelinedRequest = searchPipelineService.resolvePipeline(request); - SearchRequest transformedRequest = pipelinedRequest.transformedRequest(); - assertEquals(2 * size, transformedRequest.source().size()); + assertEquals(2 * size, pipelinedRequest.source().size()); assertEquals(size, request.source().size()); // This request doesn't specify a pipeline, it doesn't get transformed. request = new SearchRequest("_index").source(sourceBuilder); pipelinedRequest = searchPipelineService.resolvePipeline(request); - SearchRequest notTransformedRequest = pipelinedRequest.transformedRequest(); - assertEquals(size, notTransformedRequest.source().size()); - assertSame(request, notTransformedRequest); + assertEquals(size, pipelinedRequest.source().size()); } public void testTransformResponse() throws Exception { @@ -564,6 +643,89 @@ public void testTransformResponse() throws Exception { } } + public void testTransformSearchPhase() { + SearchPipelineService searchPipelineService = createWithProcessors(); + SearchPipelineMetadata metadata = new SearchPipelineMetadata( + Map.of( + "p1", + new PipelineConfiguration( + "p1", + new BytesArray("{\"phase_results_processors\" : [ { \"max_score\" : { } } ]}"), + XContentType.JSON + ) + ) + ); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); + ClusterState previousState = clusterState; + clusterState = ClusterState.builder(clusterState) + .metadata(Metadata.builder().putCustom(SearchPipelineMetadata.TYPE, metadata)) + .build(); + searchPipelineService.applyClusterState(new ClusterChangedEvent("", clusterState, previousState)); + SearchPhaseController controller = new SearchPhaseController( + writableRegistry(), + s -> InternalAggregationTestCase.emptyReduceContextBuilder() + ); + SearchPhaseContext searchPhaseContext = new MockSearchPhaseContext(10); + QueryPhaseResultConsumer searchPhaseResults = new QueryPhaseResultConsumer( + searchPhaseContext.getRequest(), + OpenSearchExecutors.newDirectExecutorService(), + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + controller, + SearchProgressListener.NOOP, + writableRegistry(), + 2, + exc -> {} + ); + + final QuerySearchResult querySearchResult = new QuerySearchResult(); + querySearchResult.setShardIndex(1); + querySearchResult.topDocs(new TopDocsAndMaxScore(new TopDocs(null, new ScoreDoc[1]), 1f), null); + searchPhaseResults.consumeResult(querySearchResult, () -> {}); + + // First try without specifying a pipeline, which should be a no-op. + SearchRequest searchRequest = new SearchRequest(); + PipelinedRequest pipelinedRequest = searchPipelineService.resolvePipeline(searchRequest); + AtomicArray notTransformedSearchPhaseResults = searchPhaseResults.getAtomicArray(); + pipelinedRequest.transformSearchPhaseResults( + searchPhaseResults, + searchPhaseContext, + SearchPhaseName.QUERY.getName(), + SearchPhaseName.FETCH.getName() + ); + assertSame(searchPhaseResults.getAtomicArray(), notTransformedSearchPhaseResults); + + // Now set the pipeline as p1 + searchRequest = new SearchRequest().pipeline("p1"); + pipelinedRequest = searchPipelineService.resolvePipeline(searchRequest); + + pipelinedRequest.transformSearchPhaseResults( + searchPhaseResults, + searchPhaseContext, + SearchPhaseName.QUERY.getName(), + SearchPhaseName.FETCH.getName() + ); + + List resultAtomicArray = searchPhaseResults.getAtomicArray().asList(); + assertEquals(1, resultAtomicArray.size()); + // updating the maxScore + for (SearchPhaseResult result : resultAtomicArray) { + assertEquals(100f, result.queryResult().topDocs().maxScore, 0); + } + + // Check Processor doesn't run for between other phases + searchRequest = new SearchRequest().pipeline("p1"); + pipelinedRequest = searchPipelineService.resolvePipeline(searchRequest); + AtomicArray notTransformedSearchPhaseResult = searchPhaseResults.getAtomicArray(); + pipelinedRequest.transformSearchPhaseResults( + searchPhaseResults, + searchPhaseContext, + SearchPhaseName.DFS_QUERY.getName(), + SearchPhaseName.QUERY.getName() + ); + + assertSame(searchPhaseResults.getAtomicArray(), notTransformedSearchPhaseResult); + } + public void testGetPipelines() { // assertEquals(0, SearchPipelineService.innerGetPipelines(null, "p1").size()); @@ -581,16 +743,23 @@ public void testGetPipelines() { "p2", new BytesArray("{\"response_processors\" : [ { \"fixed_score\": { \"score\" : 2 } } ] }"), XContentType.JSON + ), + "p3", + new PipelineConfiguration( + "p3", + new BytesArray("{\"phase_results_processors\" : [ { \"max_score\" : { } } ]}"), + XContentType.JSON ) ) ); // Return all when no ids specified List pipelines = SearchPipelineService.innerGetPipelines(metadata); - assertEquals(2, pipelines.size()); + assertEquals(3, pipelines.size()); pipelines.sort(Comparator.comparing(PipelineConfiguration::getId)); assertEquals("p1", pipelines.get(0).getId()); assertEquals("p2", pipelines.get(1).getId()); + assertEquals("p3", pipelines.get(2).getId()); // Get specific pipeline pipelines = SearchPipelineService.innerGetPipelines(metadata, "p1"); @@ -606,17 +775,19 @@ public void testGetPipelines() { // Match all pipelines = SearchPipelineService.innerGetPipelines(metadata, "*"); - assertEquals(2, pipelines.size()); + assertEquals(3, pipelines.size()); pipelines.sort(Comparator.comparing(PipelineConfiguration::getId)); assertEquals("p1", pipelines.get(0).getId()); assertEquals("p2", pipelines.get(1).getId()); + assertEquals("p3", pipelines.get(2).getId()); // Match prefix pipelines = SearchPipelineService.innerGetPipelines(metadata, "p*"); - assertEquals(2, pipelines.size()); + assertEquals(3, pipelines.size()); pipelines.sort(Comparator.comparing(PipelineConfiguration::getId)); assertEquals("p1", pipelines.get(0).getId()); assertEquals("p2", pipelines.get(1).getId()); + assertEquals("p3", pipelines.get(2).getId()); } public void testValidatePipeline() throws Exception { @@ -624,6 +795,7 @@ public void testValidatePipeline() throws Exception { ProcessorInfo reqProcessor = new ProcessorInfo("scale_request_size"); ProcessorInfo rspProcessor = new ProcessorInfo("fixed_score"); + ProcessorInfo injProcessor = new ProcessorInfo("max_score"); DiscoveryNode n1 = new DiscoveryNode("n1", buildNewFakeTransportAddress(), Version.CURRENT); DiscoveryNode n2 = new DiscoveryNode("n2", buildNewFakeTransportAddress(), Version.CURRENT); PutSearchPipelineRequest putRequest = new PutSearchPipelineRequest( @@ -631,7 +803,8 @@ public void testValidatePipeline() throws Exception { new BytesArray( "{" + "\"request_processors\": [{ \"scale_request_size\": { \"scale\" : 2 } }]," - + "\"response_processors\": [{ \"fixed_score\": { \"score\" : 2 } }]" + + "\"response_processors\": [{ \"fixed_score\": { \"score\" : 2 } }]," + + "\"phase_results_processors\" : [ { \"max_score\" : { } } ]" + "}" ), XContentType.JSON @@ -698,8 +871,7 @@ public void testInlinePipeline() throws Exception { assertEquals(1, pipeline.getSearchResponseProcessors().size()); // Verify that pipeline transforms request - SearchRequest transformedRequest = pipelinedRequest.transformedRequest(); - assertEquals(200, transformedRequest.source().size()); + assertEquals(200, pipelinedRequest.source().size()); int size = 10; SearchHit[] hits = new SearchHit[size]; @@ -729,7 +901,7 @@ public void testExceptionOnPipelineCreation() { "bad_factory", (pf, t, f, c) -> { throw new RuntimeException(); } ); - SearchPipelineService searchPipelineService = createWithProcessors(badFactory, Collections.emptyMap()); + SearchPipelineService searchPipelineService = createWithProcessors(badFactory, Collections.emptyMap(), Collections.emptyMap()); Map pipelineSourceMap = new HashMap<>(); pipelineSourceMap.put(Pipeline.REQUEST_PROCESSORS_KEY, List.of(Map.of("bad_factory", Collections.emptyMap()))); @@ -751,7 +923,11 @@ public void testExceptionOnRequestProcessing() { (pf, t, f, c) -> throwingRequestProcessor ); - SearchPipelineService searchPipelineService = createWithProcessors(throwingRequestProcessorFactory, Collections.emptyMap()); + SearchPipelineService searchPipelineService = createWithProcessors( + throwingRequestProcessorFactory, + Collections.emptyMap(), + Collections.emptyMap() + ); Map pipelineSourceMap = new HashMap<>(); pipelineSourceMap.put(Pipeline.REQUEST_PROCESSORS_KEY, List.of(Map.of("throwing_request", Collections.emptyMap()))); @@ -772,7 +948,11 @@ public void testExceptionOnResponseProcessing() throws Exception { (pf, t, f, c) -> throwingResponseProcessor ); - SearchPipelineService searchPipelineService = createWithProcessors(Collections.emptyMap(), throwingResponseProcessorFactory); + SearchPipelineService searchPipelineService = createWithProcessors( + Collections.emptyMap(), + throwingResponseProcessorFactory, + Collections.emptyMap() + ); Map pipelineSourceMap = new HashMap<>(); pipelineSourceMap.put(Pipeline.RESPONSE_PROCESSORS_KEY, List.of(Map.of("throwing_response", Collections.emptyMap()))); @@ -786,4 +966,126 @@ public void testExceptionOnResponseProcessing() throws Exception { // Exception thrown when processing response expectThrows(SearchPipelineProcessingException.class, () -> pipelinedRequest.transformResponse(response)); } + + public void testStats() throws Exception { + SearchRequestProcessor throwingRequestProcessor = new FakeRequestProcessor("throwing_request", "1", null, r -> { + throw new RuntimeException(); + }); + Map> requestProcessors = Map.of( + "successful_request", + (pf, t, f, c) -> new FakeRequestProcessor("successful_request", "2", null, r -> {}), + "throwing_request", + (pf, t, f, c) -> throwingRequestProcessor + ); + SearchResponseProcessor throwingResponseProcessor = new FakeResponseProcessor("throwing_response", "3", null, r -> { + throw new RuntimeException(); + }); + Map> responseProcessors = Map.of( + "successful_response", + (pf, t, f, c) -> new FakeResponseProcessor("successful_response", "4", null, r -> {}), + "throwing_response", + (pf, t, f, c) -> throwingResponseProcessor + ); + SearchPipelineService searchPipelineService = createWithProcessors(requestProcessors, responseProcessors, Collections.emptyMap()); + + SearchPipelineMetadata metadata = new SearchPipelineMetadata( + Map.of( + "good_response_pipeline", + new PipelineConfiguration( + "good_response_pipeline", + new BytesArray("{\"response_processors\" : [ { \"successful_response\": {} } ] }"), + XContentType.JSON + ), + "bad_response_pipeline", + new PipelineConfiguration( + "bad_response_pipeline", + new BytesArray("{\"response_processors\" : [ { \"throwing_response\": {} } ] }"), + XContentType.JSON + ), + "good_request_pipeline", + new PipelineConfiguration( + "good_request_pipeline", + new BytesArray("{\"request_processors\" : [ { \"successful_request\": {} } ] }"), + XContentType.JSON + ), + "bad_request_pipeline", + new PipelineConfiguration( + "bad_request_pipeline", + new BytesArray("{\"request_processors\" : [ { \"throwing_request\": {} } ] }"), + XContentType.JSON + ) + ) + ); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); + ClusterState previousState = clusterState; + clusterState = ClusterState.builder(clusterState) + .metadata(Metadata.builder().putCustom(SearchPipelineMetadata.TYPE, metadata)) + .build(); + searchPipelineService.applyClusterState(new ClusterChangedEvent("", clusterState, previousState)); + + SearchRequest request = new SearchRequest(); + SearchResponse response = new SearchResponse(null, null, 0, 0, 0, 0, null, null); + + searchPipelineService.resolvePipeline(request.pipeline("good_request_pipeline")).transformResponse(response); + expectThrows( + SearchPipelineProcessingException.class, + () -> searchPipelineService.resolvePipeline(request.pipeline("bad_request_pipeline")).transformResponse(response) + ); + searchPipelineService.resolvePipeline(request.pipeline("good_response_pipeline")).transformResponse(response); + expectThrows( + SearchPipelineProcessingException.class, + () -> searchPipelineService.resolvePipeline(request.pipeline("bad_response_pipeline")).transformResponse(response) + ); + + SearchPipelineStats stats = searchPipelineService.stats(); + assertPipelineStats(stats.getTotalRequestStats(), 2, 1); + assertPipelineStats(stats.getTotalResponseStats(), 2, 1); + for (SearchPipelineStats.PerPipelineStats perPipelineStats : stats.getPipelineStats()) { + SearchPipelineStats.PipelineDetailStats detailStats = stats.getPerPipelineProcessorStats() + .get(perPipelineStats.getPipelineId()); + switch (perPipelineStats.getPipelineId()) { + case "good_request_pipeline": + assertPipelineStats(perPipelineStats.getRequestStats(), 1, 0); + assertPipelineStats(perPipelineStats.getResponseStats(), 0, 0); + assertEquals(1, detailStats.requestProcessorStats().size()); + assertEquals(0, detailStats.responseProcessorStats().size()); + assertEquals("successful_request:2", detailStats.requestProcessorStats().get(0).getProcessorName()); + assertEquals("successful_request", detailStats.requestProcessorStats().get(0).getProcessorType()); + assertPipelineStats(detailStats.requestProcessorStats().get(0).getStats(), 1, 0); + break; + case "bad_request_pipeline": + assertPipelineStats(perPipelineStats.getRequestStats(), 1, 1); + assertPipelineStats(perPipelineStats.getResponseStats(), 0, 0); + assertEquals(1, detailStats.requestProcessorStats().size()); + assertEquals(0, detailStats.responseProcessorStats().size()); + assertEquals("throwing_request:1", detailStats.requestProcessorStats().get(0).getProcessorName()); + assertEquals("throwing_request", detailStats.requestProcessorStats().get(0).getProcessorType()); + assertPipelineStats(detailStats.requestProcessorStats().get(0).getStats(), 1, 1); + break; + case "good_response_pipeline": + assertPipelineStats(perPipelineStats.getRequestStats(), 0, 0); + assertPipelineStats(perPipelineStats.getResponseStats(), 1, 0); + assertEquals(0, detailStats.requestProcessorStats().size()); + assertEquals(1, detailStats.responseProcessorStats().size()); + assertEquals("successful_response:4", detailStats.responseProcessorStats().get(0).getProcessorName()); + assertEquals("successful_response", detailStats.responseProcessorStats().get(0).getProcessorType()); + assertPipelineStats(detailStats.responseProcessorStats().get(0).getStats(), 1, 0); + break; + case "bad_response_pipeline": + assertPipelineStats(perPipelineStats.getRequestStats(), 0, 0); + assertPipelineStats(perPipelineStats.getResponseStats(), 1, 1); + assertEquals(0, detailStats.requestProcessorStats().size()); + assertEquals(1, detailStats.responseProcessorStats().size()); + assertEquals("throwing_response:3", detailStats.responseProcessorStats().get(0).getProcessorName()); + assertEquals("throwing_response", detailStats.responseProcessorStats().get(0).getProcessorType()); + assertPipelineStats(detailStats.responseProcessorStats().get(0).getStats(), 1, 1); + break; + } + } + } + + private static void assertPipelineStats(OperationStats stats, long count, long failed) { + assertEquals(stats.getCount(), count); + assertEquals(stats.getFailedCount(), failed); + } } diff --git a/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineStatsTests.java b/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineStatsTests.java new file mode 100644 index 0000000000000..dac41f0db4e00 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineStatsTests.java @@ -0,0 +1,185 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline; + +import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.metrics.OperationStats; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.xcontent.DeprecationHandler; +import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +public class SearchPipelineStatsTests extends OpenSearchTestCase { + public void testSerializationRoundtrip() throws IOException { + SearchPipelineStats stats = createStats(); + SearchPipelineStats deserialized; + try (BytesStreamOutput bytesStreamOutput = new BytesStreamOutput()) { + stats.writeTo(bytesStreamOutput); + try (StreamInput bytesStreamInput = bytesStreamOutput.bytes().streamInput()) { + deserialized = new SearchPipelineStats(bytesStreamInput); + } + } + assertEquals(stats, deserialized); + } + + private static SearchPipelineStats createStats() { + return new SearchPipelineStats( + new OperationStats(1, 2, 3, 4), + new OperationStats(5, 6, 7, 8), + List.of( + new SearchPipelineStats.PerPipelineStats("p1", new OperationStats(9, 10, 11, 12), new OperationStats(13, 14, 15, 16)), + new SearchPipelineStats.PerPipelineStats("p2", new OperationStats(17, 18, 19, 20), new OperationStats(21, 22, 23, 24)) + + ), + Map.of( + "p1", + new SearchPipelineStats.PipelineDetailStats( + List.of(new SearchPipelineStats.ProcessorStats("req1:a", "req1", new OperationStats(25, 26, 27, 28))), + List.of(new SearchPipelineStats.ProcessorStats("rsp1:a", "rsp1", new OperationStats(29, 30, 31, 32))) + ), + "p2", + new SearchPipelineStats.PipelineDetailStats( + List.of( + new SearchPipelineStats.ProcessorStats("req1:a", "req1", new OperationStats(33, 34, 35, 36)), + new SearchPipelineStats.ProcessorStats("req2", "req2", new OperationStats(37, 38, 39, 40)) + ), + List.of() + ) + ) + ); + } + + public void testToXContent() throws IOException { + XContentBuilder actualBuilder = XContentBuilder.builder(JsonXContent.jsonXContent); + actualBuilder.startObject(); + createStats().toXContent(actualBuilder, null); + actualBuilder.endObject(); + + String expected = "{" + + " \"search_pipeline\" : {" + + " \"total_request\" : {" + + " \"count\" : 1," + + " \"time_in_millis\" : 2," + + " \"current\" : 3," + + " \"failed\" : 4" + + " }," + + " \"total_response\" : {" + + " \"count\" : 5," + + " \"time_in_millis\" : 6," + + " \"current\" : 7," + + " \"failed\" : 8" + + " }," + + " \"pipelines\" : {" + + " \"p1\" : {" + + " \"request\" : {" + + " \"count\" : 9," + + " \"time_in_millis\" : 10," + + " \"current\" : 11," + + " \"failed\" : 12" + + " }," + + " \"response\" : {" + + " \"count\" : 13," + + " \"time_in_millis\" : 14," + + " \"current\" : 15," + + " \"failed\" : 16" + + " }," + + " \"request_processors\" : [" + + " {" + + " \"req1:a\" : {" + + " \"type\" : \"req1\"," + + " \"stats\" : {" + + " \"count\" : 25," + + " \"time_in_millis\" : 26," + + " \"current\" : 27," + + " \"failed\" : 28" + + " }" + + " }" + + " }" + + " ]," + + " \"response_processors\" : [" + + " {" + + " \"rsp1:a\" : {" + + " \"type\" : \"rsp1\"," + + " \"stats\" : {" + + " \"count\" : 29," + + " \"time_in_millis\" : 30," + + " \"current\" : 31," + + " \"failed\" : 32" + + " }" + + " }" + + " }" + + " ]" + + " }," + + " \"p2\" : {" + + " \"request\" : {" + + " \"count\" : 17," + + " \"time_in_millis\" : 18," + + " \"current\" : 19," + + " \"failed\" : 20" + + " }," + + " \"response\" : {" + + " \"count\" : 21," + + " \"time_in_millis\" : 22," + + " \"current\" : 23," + + " \"failed\" : 24" + + " }," + + " \"request_processors\" : [" + + " {" + + " \"req1:a\" : {" + + " \"type\" : \"req1\"," + + " \"stats\" : {" + + " \"count\" : 33," + + " \"time_in_millis\" : 34," + + " \"current\" : 35," + + " \"failed\" : 36" + + " }" + + " }" + + " }," + + " {" + + " \"req2\" : {" + + " \"type\" : \"req2\"," + + " \"stats\" : {" + + " \"count\" : 37," + + " \"time_in_millis\" : 38," + + " \"current\" : 39," + + " \"failed\" : 40" + + " }" + + " }" + + " }" + + " ]," + + " \"response_processors\" : [ ]" + + " }" + + " }" + + " }" + + "}"; + + XContentParser expectedParser = JsonXContent.jsonXContent.createParser( + this.xContentRegistry(), + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + expected + ); + XContentBuilder expectedBuilder = XContentBuilder.builder(JsonXContent.jsonXContent); + expectedBuilder.generator().copyCurrentStructure(expectedParser); + + assertEquals( + XContentHelper.convertToMap(BytesReference.bytes(expectedBuilder), false, (MediaType) XContentType.JSON), + XContentHelper.convertToMap(BytesReference.bytes(actualBuilder), false, (MediaType) XContentType.JSON) + ); + } +} diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 0bb2b604e8f1a..88899a1b282af 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -1835,7 +1835,7 @@ public void onFailure(final Exception e) { emptyMap(), null, emptyMap(), - new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService), + new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService, threadPool), repositoriesServiceReference::get, fileCacheCleaner ); diff --git a/server/src/test/java/org/opensearch/telemetry/TelemetryModuleTests.java b/server/src/test/java/org/opensearch/telemetry/TelemetryModuleTests.java new file mode 100644 index 0000000000000..45344ab4253f7 --- /dev/null +++ b/server/src/test/java/org/opensearch/telemetry/TelemetryModuleTests.java @@ -0,0 +1,65 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry; + +import org.opensearch.plugins.TelemetryPlugin; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.List; +import java.util.Optional; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class TelemetryModuleTests extends OpenSearchTestCase { + + public void testGetTelemetrySuccess() { + TelemetryPlugin telemetryPlugin = mock(TelemetryPlugin.class); + when(telemetryPlugin.getName()).thenReturn("otel"); + Telemetry mockTelemetry = mock(Telemetry.class); + when(telemetryPlugin.getTelemetry(any())).thenReturn(Optional.of(mockTelemetry)); + List telemetryPlugins = List.of(telemetryPlugin); + + TelemetryModule telemetryModule = new TelemetryModule(telemetryPlugins, any()); + + assertTrue(telemetryModule.getTelemetry().isPresent()); + assertEquals(mockTelemetry, telemetryModule.getTelemetry().get()); + } + + public void testGetTelemetryWithMultipleInstalledPlugins() { + TelemetryPlugin telemetryPlugin1 = mock(TelemetryPlugin.class); + TelemetryPlugin telemetryPlugin2 = mock(TelemetryPlugin.class); + when(telemetryPlugin1.getName()).thenReturn("otel"); + Telemetry mockTelemetry1 = mock(Telemetry.class); + Telemetry mockTelemetry2 = mock(Telemetry.class); + + when(telemetryPlugin1.getTelemetry(any())).thenReturn(Optional.of(mockTelemetry1)); + when(telemetryPlugin2.getTelemetry(any())).thenReturn(Optional.of(mockTelemetry2)); + + List telemetryPlugins = List.of(telemetryPlugin1, telemetryPlugin2); + + try { + TelemetryModule telemetryModule = new TelemetryModule(telemetryPlugins, any()); + } catch (Exception e) { + assertEquals("Cannot register more than one telemetry", e.getMessage()); + } + + } + + public void testGetTelemetryWithNoPlugins() { + TelemetryPlugin telemetryPlugin = mock(TelemetryPlugin.class); + when(telemetryPlugin.getName()).thenReturn("otel"); + TelemetryModule telemetryModule = new TelemetryModule(List.of(telemetryPlugin), any()); + + assertFalse(telemetryModule.getTelemetry().isPresent()); + + } + +} diff --git a/server/src/test/java/org/opensearch/telemetry/tracing/TracerFactoryTests.java b/server/src/test/java/org/opensearch/telemetry/tracing/TracerFactoryTests.java new file mode 100644 index 0000000000000..df9cdd6669d23 --- /dev/null +++ b/server/src/test/java/org/opensearch/telemetry/tracing/TracerFactoryTests.java @@ -0,0 +1,68 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import org.junit.After; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.telemetry.Telemetry; +import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.telemetry.tracing.noop.NoopTracer; + +import java.util.HashSet; +import java.util.List; +import java.util.Optional; +import java.util.Set; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class TracerFactoryTests extends OpenSearchTestCase { + + private TracerFactory tracerFactory; + + @After + public void close() { + tracerFactory.close(); + } + + public void testGetTracerWithTracingDisabledReturnsNoopTracer() { + Settings settings = Settings.builder().put(TelemetrySettings.TRACER_ENABLED_SETTING.getKey(), false).build(); + TelemetrySettings telemetrySettings = new TelemetrySettings(settings, new ClusterSettings(settings, getClusterSettings())); + Telemetry mockTelemetry = mock(Telemetry.class); + when(mockTelemetry.getTracingTelemetry()).thenReturn(mock(TracingTelemetry.class)); + tracerFactory = new TracerFactory(telemetrySettings, Optional.of(mockTelemetry), new ThreadContext(Settings.EMPTY)); + + Tracer tracer = tracerFactory.getTracer(); + assertTrue(tracer instanceof NoopTracer); + assertTrue(tracer.startSpan("foo") == SpanScope.NO_OP); + } + + public void testGetTracerWithTracingEnabledReturnsDefaultTracer() { + Settings settings = Settings.builder().put(TelemetrySettings.TRACER_ENABLED_SETTING.getKey(), true).build(); + TelemetrySettings telemetrySettings = new TelemetrySettings(settings, new ClusterSettings(settings, getClusterSettings())); + Telemetry mockTelemetry = mock(Telemetry.class); + when(mockTelemetry.getTracingTelemetry()).thenReturn(mock(TracingTelemetry.class)); + tracerFactory = new TracerFactory(telemetrySettings, Optional.of(mockTelemetry), new ThreadContext(Settings.EMPTY)); + + Tracer tracer = tracerFactory.getTracer(); + assertTrue(tracer instanceof DefaultTracer); + + } + + private Set> getClusterSettings() { + Set> allTracerSettings = new HashSet<>(); + ClusterSettings.FEATURE_FLAGGED_CLUSTER_SETTINGS.get(List.of(FeatureFlags.TELEMETRY)).stream().forEach((allTracerSettings::add)); + return allTracerSettings; + } +} diff --git a/settings.gradle b/settings.gradle index bf899d04c1e08..d908e941b3d6b 100644 --- a/settings.gradle +++ b/settings.gradle @@ -87,7 +87,8 @@ List projects = [ 'test:fixtures:minio-fixture', 'test:fixtures:old-elasticsearch', 'test:fixtures:s3-fixture', - 'test:logger-usage' + 'test:logger-usage', + 'test:telemetry' ] /** diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 70b84a405c9c6..b1a87fe6c3112 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -37,13 +37,14 @@ versions << [ ] dependencies { - api("org.apache.hadoop:hadoop-minicluster:3.3.5") { + api("org.apache.hadoop:hadoop-minicluster:3.3.6") { exclude module: 'websocket-client' exclude module: 'jettison' exclude module: 'netty' exclude module: 'guava' exclude module: 'protobuf-java' exclude group: 'org.codehaus.jackson' + exclude group: "org.bouncycastle" } api "org.codehaus.jettison:jettison:${versions.jettison}" api "org.apache.commons:commons-compress:1.23.0" @@ -51,7 +52,6 @@ dependencies { api "org.apache.logging.log4j:log4j-core:${versions.log4j}" api "io.netty:netty-all:${versions.netty}" api 'com.google.code.gson:gson:2.10.1' - api "org.bouncycastle:bcpkix-jdk15on:${versions.bouncycastle}" api "com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:${versions.jackson}" api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" api "com.fasterxml.woodstox:woodstox-core:${versions.woodstox}" @@ -65,4 +65,5 @@ dependencies { api "org.apache.commons:commons-text:1.10.0" api "commons-net:commons-net:3.9.0" runtimeOnly "com.google.guava:guava:${versions.guava}" + } diff --git a/test/framework/build.gradle b/test/framework/build.gradle index 255f554db7a79..2532fdf1938fd 100644 --- a/test/framework/build.gradle +++ b/test/framework/build.gradle @@ -38,6 +38,7 @@ dependencies { api project(':libs:opensearch-nio') api project(":server") api project(":libs:opensearch-cli") + api project(":test:telemetry") api "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" api "junit:junit:${versions.junit}" api "org.hamcrest:hamcrest:${versions.hamcrest}" diff --git a/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java b/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java index cf5f6613c3ea1..6634d1b4dbafc 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java +++ b/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java @@ -119,7 +119,8 @@ List adjustNodesStats(List nodesStats) { nodeStats.getClusterManagerThrottlingStats(), nodeStats.getWeightedRoutingStats(), nodeStats.getFileCacheStats(), - nodeStats.getTaskCancellationStats() + nodeStats.getTaskCancellationStats(), + nodeStats.getSearchPipelineStats() ); }).collect(Collectors.toList()); } diff --git a/test/framework/src/main/java/org/opensearch/common/bytes/AbstractBytesReferenceTestCase.java b/test/framework/src/main/java/org/opensearch/common/bytes/AbstractBytesReferenceTestCase.java index dca46c37ca7d2..dd71711f9154c 100644 --- a/test/framework/src/main/java/org/opensearch/common/bytes/AbstractBytesReferenceTestCase.java +++ b/test/framework/src/main/java/org/opensearch/common/bytes/AbstractBytesReferenceTestCase.java @@ -478,6 +478,45 @@ public void testToUtf8() throws IOException { // TODO: good way to test? } + public void testUTF8toString_ExceedsMaxLength() { + AbstractBytesReference abr = new TestAbstractBytesReference(); + IllegalArgumentException e = assertThrows(IllegalArgumentException.class, abr::utf8ToString); + assertTrue(e.getMessage().contains("UTF16 String size is")); + assertTrue(e.getMessage().contains("should be less than")); + } + + static class TestAbstractBytesReference extends AbstractBytesReference { + @Override + public byte get(int index) { + return 0; + } + + @Override + public int length() { + return 0; + } + + @Override + public BytesReference slice(int from, int length) { + return null; + } + + @Override + public long ramBytesUsed() { + return 0; + } + + @Override + public BytesRef toBytesRef() { + return new BytesRef("UTF16 length exceed test"); + } + + @Override + public int getMaxUTF16Length() { + return 1; + } + } + public void testToBytesRef() throws IOException { int length = randomIntBetween(0, PAGE_SIZE); BytesReference pbr = newBytesReference(length); diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index 659f473403ec8..7f3819563dcbd 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -67,6 +67,7 @@ import org.opensearch.common.blobstore.fs.FsBlobStore; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.concurrent.GatedCloseable; +import org.opensearch.common.io.PathUtils; import org.opensearch.common.lucene.uid.Versions; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; @@ -101,6 +102,9 @@ import org.opensearch.index.store.RemoteSegmentStoreDirectory; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; +import org.opensearch.index.store.lockmanager.RemoteStoreLockManager; +import org.opensearch.index.store.lockmanager.RemoteStoreMetadataLockManager; +import org.opensearch.index.store.RemoteBufferedOutputDirectory; import org.opensearch.index.translog.InternalTranslogFactory; import org.opensearch.index.translog.RemoteBlobStoreInternalTranslogFactory; import org.opensearch.index.translog.Translog; @@ -574,7 +578,14 @@ protected IndexShard newShard( RemoteRefreshSegmentPressureService remoteRefreshSegmentPressureService = null; if (indexSettings.isRemoteStoreEnabled()) { if (remoteStore == null) { - remoteStore = createRemoteStore(createTempDir(), routing, indexMetadata); + Path remoteStorePath; + String remoteStoreRepository = indexSettings.getRemoteStoreRepository(); + if (remoteStoreRepository != null && remoteStoreRepository.endsWith("__test")) { + remoteStorePath = PathUtils.get(remoteStoreRepository.replace("__test", "")); + } else { + remoteStorePath = createTempDir(); + } + remoteStore = createRemoteStore(remoteStorePath, routing, indexMetadata); } remoteRefreshSegmentPressureService = new RemoteRefreshSegmentPressureService(clusterService, indexSettings.getSettings()); } @@ -642,21 +653,30 @@ protected RepositoriesService createRepositoriesService() { protected Store createRemoteStore(Path path, ShardRouting shardRouting, IndexMetadata metadata) throws IOException { Settings nodeSettings = Settings.builder().put("node.name", shardRouting.currentNodeId()).build(); + ShardId shardId = shardRouting.shardId(); + RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = createRemoteSegmentStoreDirectory(shardId, path); + return createStore(shardId, new IndexSettings(metadata, nodeSettings), remoteSegmentStoreDirectory); + } - ShardId shardId = new ShardId("index", "_na_", 0); + protected RemoteSegmentStoreDirectory createRemoteSegmentStoreDirectory(ShardId shardId, Path path) throws IOException { NodeEnvironment.NodePath remoteNodePath = new NodeEnvironment.NodePath(path); ShardPath remoteShardPath = new ShardPath(false, remoteNodePath.resolve(shardId), remoteNodePath.resolve(shardId), shardId); RemoteDirectory dataDirectory = newRemoteDirectory(remoteShardPath.resolveIndex()); RemoteDirectory metadataDirectory = newRemoteDirectory(remoteShardPath.resolveIndex()); - RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory, null); - return createStore(shardId, new IndexSettings(metadata, nodeSettings), remoteSegmentStoreDirectory); + RemoteStoreLockManager remoteStoreLockManager = new RemoteStoreMetadataLockManager( + new RemoteBufferedOutputDirectory(getBlobContainer(remoteShardPath.resolveIndex())) + ); + return new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory, remoteStoreLockManager, threadPool); } private RemoteDirectory newRemoteDirectory(Path f) throws IOException { + return new RemoteDirectory(getBlobContainer(f)); + } + + protected BlobContainer getBlobContainer(Path f) throws IOException { FsBlobStore fsBlobStore = new FsBlobStore(1024, f, false); BlobPath blobPath = new BlobPath(); - BlobContainer fsBlobContainer = new FsBlobContainer(fsBlobStore, blobPath, f); - return new RemoteDirectory(fsBlobContainer); + return new FsBlobContainer(fsBlobStore, blobPath, f); } /** diff --git a/test/framework/src/main/java/org/opensearch/repositories/blobstore/BlobStoreTestUtil.java b/test/framework/src/main/java/org/opensearch/repositories/blobstore/BlobStoreTestUtil.java index 28660ba834a65..ad515f2405f1d 100644 --- a/test/framework/src/main/java/org/opensearch/repositories/blobstore/BlobStoreTestUtil.java +++ b/test/framework/src/main/java/org/opensearch/repositories/blobstore/BlobStoreTestUtil.java @@ -240,15 +240,18 @@ private static void assertSnapshotUUIDs(BlobStoreRepository repository, Reposito final BlobContainer repoRoot = repository.blobContainer(); final Collection snapshotIds = repositoryData.getSnapshotIds(); final List expectedSnapshotUUIDs = snapshotIds.stream().map(SnapshotId::getUUID).collect(Collectors.toList()); - for (String prefix : new String[] { BlobStoreRepository.SNAPSHOT_PREFIX, BlobStoreRepository.METADATA_PREFIX }) { - final Collection foundSnapshotUUIDs = repoRoot.listBlobs() - .keySet() - .stream() - .filter(p -> p.startsWith(prefix)) - .map(p -> p.replace(prefix, "").replace(".dat", "")) - .collect(Collectors.toSet()); - assertThat(foundSnapshotUUIDs, containsInAnyOrder(expectedSnapshotUUIDs.toArray(Strings.EMPTY_ARRAY))); + Collection foundSnapshotUUIDs = new HashSet<>(); + for (String prefix : new String[] { BlobStoreRepository.SNAPSHOT_PREFIX, BlobStoreRepository.SHALLOW_SNAPSHOT_PREFIX }) { + foundSnapshotUUIDs.addAll( + repoRoot.listBlobs() + .keySet() + .stream() + .filter(p -> p.startsWith(prefix)) + .map(p -> p.replace(prefix, "").replace(".dat", "")) + .collect(Collectors.toSet()) + ); } + assertThat(foundSnapshotUUIDs, containsInAnyOrder(expectedSnapshotUUIDs.toArray(Strings.EMPTY_ARRAY))); final BlobContainer indicesContainer = repository.getBlobContainer().children().get("indices"); final Map indices; @@ -303,10 +306,16 @@ private static void assertSnapshotUUIDs(BlobStoreRepository repository, Reposito .stream() .noneMatch(shardFailure -> shardFailure.index().equals(index) && shardFailure.shardId() == shardId)) { final Map shardPathContents = shardContainer.listBlobs(); - assertThat( - shardPathContents, - hasKey(String.format(Locale.ROOT, BlobStoreRepository.SNAPSHOT_NAME_FORMAT, snapshotId.getUUID())) + + assertTrue( + shardPathContents.containsKey( + String.format(Locale.ROOT, BlobStoreRepository.SHALLOW_SNAPSHOT_NAME_FORMAT, snapshotId.getUUID()) + ) + || shardPathContents.containsKey( + String.format(Locale.ROOT, BlobStoreRepository.SNAPSHOT_NAME_FORMAT, snapshotId.getUUID()) + ) ); + assertThat( shardPathContents.keySet() .stream() diff --git a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java index 9933297aa1c96..ddf9f3e96b9b4 100644 --- a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -105,6 +105,7 @@ public abstract class AbstractSnapshotIntegTestCase extends OpenSearchIntegTestCase { + protected final static String TEST_REMOTE_STORE_REPO_SUFFIX = "__rs"; private static final String OLD_VERSION_SNAPSHOT_PREFIX = "old-version-snapshot-"; // Large snapshot pool settings to set up nodes for tests involving multiple repositories that need to have enough @@ -148,14 +149,19 @@ public void verifyNoLeakedListeners() throws Exception { @After public void assertRepoConsistency() { if (skipRepoConsistencyCheckReason == null) { - clusterAdmin().prepareGetRepositories().get().repositories().forEach(repositoryMetadata -> { - final String name = repositoryMetadata.name(); - if (repositoryMetadata.settings().getAsBoolean("readonly", false) == false) { - clusterAdmin().prepareDeleteSnapshot(name, OLD_VERSION_SNAPSHOT_PREFIX + "*").get(); - clusterAdmin().prepareCleanupRepository(name).get(); - } - BlobStoreTestUtil.assertRepoConsistency(internalCluster(), name); - }); + clusterAdmin().prepareGetRepositories() + .get() + .repositories() + .stream() + .filter(repositoryMetadata -> !repositoryMetadata.name().endsWith(TEST_REMOTE_STORE_REPO_SUFFIX)) + .forEach(repositoryMetadata -> { + final String name = repositoryMetadata.name(); + if (repositoryMetadata.settings().getAsBoolean("readonly", false) == false) { + clusterAdmin().prepareDeleteSnapshot(name, OLD_VERSION_SNAPSHOT_PREFIX + "*").get(); + clusterAdmin().prepareCleanupRepository(name).get(); + } + BlobStoreTestUtil.assertRepoConsistency(internalCluster(), name); + }); } else { logger.info("--> skipped repo consistency checks because [{}]", skipRepoConsistencyCheckReason); } @@ -367,10 +373,36 @@ protected void createRepository(String repoName, String type, Settings.Builder s assertAcked(clusterAdmin().preparePutRepository(repoName).setType(type).setSettings(settings)); } + protected void updateRepository(String repoName, String type, Settings.Builder settings) { + logger.info("--> updating repository [{}] [{}]", repoName, type); + assertAcked(clusterAdmin().preparePutRepository(repoName).setType(type).setSettings(settings)); + } + protected void createRepository(String repoName, String type, Path location) { createRepository(repoName, type, Settings.builder().put("location", location)); } + protected Settings.Builder getRepositorySettings(Path location, boolean shallowCopyEnabled) { + Settings.Builder settingsBuilder = randomRepositorySettings(); + settingsBuilder.put("location", location); + if (shallowCopyEnabled) { + settingsBuilder.put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true); + } + return settingsBuilder; + } + + protected Settings.Builder getRepositorySettings(Path location, String basePath, boolean shallowCopyEnabled) { + Settings.Builder settingsBuilder = randomRepositorySettings(); + settingsBuilder.put("location", location); + if (shallowCopyEnabled) { + settingsBuilder.put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true); + } + if (basePath != null) { + settingsBuilder.put("base_path", basePath); + } + return settingsBuilder; + } + protected void createRepository(String repoName, String type) { createRepository(repoName, type, randomRepositorySettings()); } diff --git a/test/framework/src/main/java/org/opensearch/snapshots/mockstore/MockRepository.java b/test/framework/src/main/java/org/opensearch/snapshots/mockstore/MockRepository.java index fcaf9f6c900d3..7a7c4bd448c55 100644 --- a/test/framework/src/main/java/org/opensearch/snapshots/mockstore/MockRepository.java +++ b/test/framework/src/main/java/org/opensearch/snapshots/mockstore/MockRepository.java @@ -114,7 +114,7 @@ public long getFailureCount() { return failureCounter.get(); } - private final double randomControlIOExceptionRate; + private volatile double randomControlIOExceptionRate; private final double randomDataFileIOExceptionRate; @@ -246,6 +246,10 @@ public synchronized void unblock() { this.notifyAll(); } + public void setRandomControlIOExceptionRate(double randomControlIOExceptionRate) { + this.randomControlIOExceptionRate = randomControlIOExceptionRate; + } + public void blockOnDataFiles(boolean blocked) { blockOnDataFiles = blocked; } diff --git a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java index 38617d09b703d..49d8b64bc71cd 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java @@ -2685,6 +2685,7 @@ public void ensureEstimatedStats() { false, false, false, + false, false ); assertThat( diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index caa5b90016740..fec45219ace81 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -109,6 +109,7 @@ import org.opensearch.common.unit.ByteSizeUnit; import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.common.xcontent.XContentType; @@ -152,6 +153,7 @@ import org.opensearch.test.disruption.ServiceDisruptionScheme; import org.opensearch.test.store.MockFSIndexStore; import org.opensearch.test.transport.MockTransportService; +import org.opensearch.test.telemetry.MockTelemetryPlugin; import org.opensearch.transport.TransportInterceptor; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestHandler; @@ -776,6 +778,7 @@ protected Settings featureFlagSettings() { for (Setting builtInFlag : FeatureFlagSettings.BUILT_IN_FEATURE_FLAGS) { featureSettings.put(builtInFlag.getKey(), builtInFlag.getDefaultRaw(Settings.EMPTY)); } + featureSettings.put(FeatureFlags.TELEMETRY_SETTING.getKey(), true); return featureSettings.build(); } @@ -2101,6 +2104,7 @@ protected Collection> getMockPlugins() { if (addMockGeoShapeFieldMapper()) { mocks.add(TestGeoShapeFieldMapperPlugin.class); } + mocks.add(MockTelemetryPlugin.class); return Collections.unmodifiableList(mocks); } diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java index bf797ef6b310b..91c48f1679f9a 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java @@ -48,6 +48,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.common.Strings; @@ -66,6 +67,8 @@ import org.opensearch.plugins.Plugin; import org.opensearch.script.MockScriptService; import org.opensearch.search.internal.SearchContext; +import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.test.telemetry.MockTelemetryPlugin; import org.opensearch.transport.TransportSettings; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -242,6 +245,8 @@ private Node newNode() { .put(HierarchyCircuitBreakerService.USE_REAL_MEMORY_USAGE_SETTING.getKey(), false) .putList(DISCOVERY_SEED_HOSTS_SETTING.getKey()) // empty list disables a port scan for other nodes .putList(INITIAL_CLUSTER_MANAGER_NODES_SETTING.getKey(), nodeName) + .put(FeatureFlags.TELEMETRY_SETTING.getKey(), true) + .put(TelemetrySettings.TRACER_ENABLED_SETTING.getKey(), true) .put(nodeSettings()) // allow test cases to provide their own settings or override these .build(); @@ -254,6 +259,7 @@ private Node newNode() { plugins.add(MockHttpTransport.TestPlugin.class); } plugins.add(MockScriptService.TestPlugin.class); + plugins.add(MockTelemetryPlugin.class); Node node = new MockNode(settings, plugins, forbidPrivateIndexSettings()); try { node.start(); diff --git a/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetry.java b/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetry.java new file mode 100644 index 0000000000000..c02ab1d737303 --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetry.java @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test.telemetry; + +import org.opensearch.telemetry.Telemetry; +import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.telemetry.metrics.MetricsTelemetry; +import org.opensearch.test.telemetry.tracing.MockTracingTelemetry; +import org.opensearch.telemetry.tracing.TracingTelemetry; + +/** + * Mock {@link Telemetry} implementation for testing. + */ +public class MockTelemetry implements Telemetry { + + private final TelemetrySettings settings; + + /** + * Constructor with settings. + * @param settings telemetry settings. + */ + public MockTelemetry(TelemetrySettings settings) { + this.settings = settings; + } + + @Override + public TracingTelemetry getTracingTelemetry() { + return new MockTracingTelemetry(); + } + + @Override + public MetricsTelemetry getMetricsTelemetry() { + return new MetricsTelemetry() { + }; + } +} diff --git a/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetryPlugin.java b/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetryPlugin.java new file mode 100644 index 0000000000000..41cc5c1e77a34 --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetryPlugin.java @@ -0,0 +1,39 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test.telemetry; + +import java.util.Optional; +import org.opensearch.plugins.Plugin; +import org.opensearch.plugins.TelemetryPlugin; +import org.opensearch.telemetry.Telemetry; +import org.opensearch.telemetry.TelemetrySettings; + +/** + * Mock {@link TelemetryPlugin} implementation for testing. + */ +public class MockTelemetryPlugin extends Plugin implements TelemetryPlugin { + private static final String MOCK_TRACER_NAME = "mock"; + + /** + * Base constructor. + */ + public MockTelemetryPlugin() { + + } + + @Override + public Optional getTelemetry(TelemetrySettings settings) { + return Optional.of(new MockTelemetry(settings)); + } + + @Override + public String getName() { + return MOCK_TRACER_NAME; + } +} diff --git a/test/telemetry/build.gradle b/test/telemetry/build.gradle new file mode 100644 index 0000000000000..fbabe43aa5e5a --- /dev/null +++ b/test/telemetry/build.gradle @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +apply plugin: 'opensearch.build' +apply plugin: 'opensearch.publish' + +dependencies { + api project(":libs:opensearch-common") + api project(":libs:opensearch-telemetry") +} + +tasks.named('forbiddenApisMain').configure { + //package does not depend on core, so only jdk signatures should be checked + replaceSignatureFiles 'jdk-signatures' +} + +test.enabled = false diff --git a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockSpan.java b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockSpan.java new file mode 100644 index 0000000000000..876145f6bf653 --- /dev/null +++ b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockSpan.java @@ -0,0 +1,163 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test.telemetry.tracing; + +import java.util.HashMap; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.ThreadLocalRandom; +import java.util.function.Supplier; +import org.opensearch.telemetry.tracing.AbstractSpan; +import org.opensearch.telemetry.tracing.Span; + +/** + * MockSpan for testing and strict check validations. Not to be used for production cases. + */ +public class MockSpan extends AbstractSpan { + private final SpanProcessor spanProcessor; + private final Map metadata; + private final String traceId; + private final String spanId; + private boolean hasEnded; + private final Long startTime; + private Long endTime; + + private final Object lock = new Object(); + + private static final Supplier randomSupplier = ThreadLocalRandom::current; + + /** + * Base Constructor. + * @param spanName span name + * @param parentSpan parent span + * @param spanProcessor span processor + */ + public MockSpan(String spanName, Span parentSpan, SpanProcessor spanProcessor) { + this( + spanName, + parentSpan, + parentSpan != null ? parentSpan.getTraceId() : IdGenerator.generateTraceId(), + IdGenerator.generateSpanId(), + spanProcessor + ); + } + + /** + * Constructor with traceId and SpanIds + * @param spanName Span Name + * @param parentSpan Parent Span + * @param traceId Trace ID + * @param spanId Span ID + * @param spanProcessor Span Processor + */ + public MockSpan(String spanName, Span parentSpan, String traceId, String spanId, SpanProcessor spanProcessor) { + super(spanName, parentSpan); + this.spanProcessor = spanProcessor; + this.metadata = new HashMap<>(); + this.traceId = traceId; + this.spanId = spanId; + this.startTime = System.nanoTime(); + } + + @Override + public void endSpan() { + synchronized (lock) { + if (hasEnded) { + return; + } + endTime = System.nanoTime(); + hasEnded = true; + } + spanProcessor.onEnd(this); + } + + @Override + public void addAttribute(String key, String value) { + putMetadata(key, value); + } + + @Override + public void addAttribute(String key, Long value) { + putMetadata(key, value); + } + + @Override + public void addAttribute(String key, Double value) { + putMetadata(key, value); + } + + @Override + public void addAttribute(String key, Boolean value) { + putMetadata(key, value); + } + + @Override + public void addEvent(String event) { + putMetadata(event, null); + } + + private void putMetadata(String key, Object value) { + metadata.put(key, value); + } + + @Override + public String getTraceId() { + return traceId; + } + + @Override + public String getSpanId() { + return spanId; + } + + /** + * Returns whether the span is ended or not. + * @return span end status. + */ + public boolean hasEnded() { + synchronized (lock) { + return hasEnded; + } + } + + /** + * Returns the start time of the span. + * @return start time of the span. + */ + public Long getStartTime() { + return startTime; + } + + /** + * Returns the start time of the span. + * @return end time of the span. + */ + public Long getEndTime() { + return endTime; + } + + public void setError(Exception exception) { + putMetadata("ERROR", exception.getMessage()); + } + + private static class IdGenerator { + private static String generateSpanId() { + long id = randomSupplier.get().nextLong(); + return Long.toHexString(id); + } + + private static String generateTraceId() { + long idHi = randomSupplier.get().nextLong(); + long idLo = randomSupplier.get().nextLong(); + long result = idLo | (idHi << 32); + return Long.toHexString(result); + } + + } +} diff --git a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockTracingContextPropagator.java b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockTracingContextPropagator.java new file mode 100644 index 0000000000000..7e3f5a9031100 --- /dev/null +++ b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockTracingContextPropagator.java @@ -0,0 +1,56 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test.telemetry.tracing; + +import java.util.Locale; +import java.util.Map; +import java.util.function.BiConsumer; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.TracingContextPropagator; + +/** + * Mock {@link TracingContextPropagator} to persist the span for internode communication. + */ +public class MockTracingContextPropagator implements TracingContextPropagator { + + private static final String TRACE_PARENT = "traceparent"; + private static final String SEPARATOR = "~"; + private final SpanProcessor spanProcessor; + + /** + * Constructor + * @param spanProcessor span processor. + */ + public MockTracingContextPropagator(SpanProcessor spanProcessor) { + this.spanProcessor = spanProcessor; + } + + @Override + public Span extract(Map props) { + String value = props.get(TRACE_PARENT); + if (value != null) { + String[] values = value.split(SEPARATOR); + String traceId = values[0]; + String spanId = values[1]; + return new MockSpan(null, null, traceId, spanId, spanProcessor); + } else { + return null; + } + } + + @Override + public void inject(Span currentSpan, BiConsumer setter) { + if (currentSpan instanceof MockSpan) { + String traceId = currentSpan.getTraceId(); + String spanId = currentSpan.getSpanId(); + String traceParent = String.format(Locale.ROOT, "%s%s%s", traceId, SEPARATOR, spanId); + setter.accept(TRACE_PARENT, traceParent); + } + } +} diff --git a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockTracingTelemetry.java b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockTracingTelemetry.java new file mode 100644 index 0000000000000..531b4ce36c36a --- /dev/null +++ b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockTracingTelemetry.java @@ -0,0 +1,46 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test.telemetry.tracing; + +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.TracingContextPropagator; +import org.opensearch.telemetry.tracing.TracingTelemetry; + +/** + * Mock {@link TracingTelemetry} implementation for testing. + */ +public class MockTracingTelemetry implements TracingTelemetry { + + private final SpanProcessor spanProcessor = new StrictCheckSpanProcessor(); + + /** + * Base constructor. + */ + public MockTracingTelemetry() { + + } + + @Override + public Span createSpan(String spanName, Span parentSpan) { + Span span = new MockSpan(spanName, parentSpan, spanProcessor); + spanProcessor.onStart(span); + return span; + } + + @Override + public TracingContextPropagator getContextPropagator() { + return new MockTracingContextPropagator(spanProcessor); + } + + @Override + public void close() { + ((StrictCheckSpanProcessor) spanProcessor).ensureAllSpansAreClosed(); + ((StrictCheckSpanProcessor) spanProcessor).clear(); + } +} diff --git a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/SpanProcessor.java b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/SpanProcessor.java new file mode 100644 index 0000000000000..cb9d0dbd428e4 --- /dev/null +++ b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/SpanProcessor.java @@ -0,0 +1,28 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test.telemetry.tracing; + +import org.opensearch.telemetry.tracing.Span; + +/** + * Processes the span and can perform any action on the span start and end. + */ +public interface SpanProcessor { + /** + * Logic to be executed on span start. + * @param span span which is starting. + */ + void onStart(Span span); + + /** + * Logic to be executed on span end. + * @param span span which is ending. + */ + void onEnd(Span span); +} diff --git a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/StrictCheckSpanProcessor.java b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/StrictCheckSpanProcessor.java new file mode 100644 index 0000000000000..34d4d96809755 --- /dev/null +++ b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/StrictCheckSpanProcessor.java @@ -0,0 +1,84 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test.telemetry.tracing; + +import java.util.Arrays; +import java.util.Locale; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import org.opensearch.telemetry.tracing.Span; + +/** + * Strict check span processor to validate the spans. + */ +public class StrictCheckSpanProcessor implements SpanProcessor { + private final Map spanMap = new ConcurrentHashMap<>(); + + /** + * Base constructor. + */ + public StrictCheckSpanProcessor() { + + } + + @Override + public void onStart(Span span) { + spanMap.put(span.getSpanId(), Thread.currentThread().getStackTrace()); + } + + @Override + public void onEnd(Span span) { + spanMap.remove(span.getSpanId()); + } + + /** + * Ensures that all the spans are closed. Throws exception message with stack trace of the method form + * where the span was created. We can enhance it to print all the failed spans in a single go based on + * the usability. + */ + public void ensureAllSpansAreClosed() { + if (!spanMap.isEmpty()) { + for (Map.Entry entry : spanMap.entrySet()) { + StackTraceElement[] filteredStackTrace = getFilteredStackTrace(entry.getValue()); + AssertionError error = new AssertionError( + String.format( + Locale.ROOT, + " Total [%d] spans are not ended properly. " + "Find below the stack trace for one of the un-ended span", + spanMap.size() + ) + ); + error.setStackTrace(filteredStackTrace); + spanMap.clear(); + throw error; + } + } + } + + /** + * Clears the state. + */ + public void clear() { + spanMap.clear(); + } + + private StackTraceElement[] getFilteredStackTrace(StackTraceElement[] stackTraceElements) { + int filteredElementsCount = 0; + while (filteredElementsCount < stackTraceElements.length) { + String className = stackTraceElements[filteredElementsCount].getClassName(); + if (className.startsWith("java.lang.Thread") + || className.startsWith("org.opensearch.telemetry") + || className.startsWith("org.opensearch.tracing")) { + filteredElementsCount++; + } else { + break; + } + } + return Arrays.copyOfRange(stackTraceElements, filteredElementsCount, stackTraceElements.length); + } +} diff --git a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/package-info.java b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/package-info.java new file mode 100644 index 0000000000000..83e2b4035acbf --- /dev/null +++ b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base opensearch package. */ +package org.opensearch.test.telemetry.tracing;