diff --git a/.backportrc.json b/.backportrc.json index cb8aa183f7bf9..59843f4d5f134 100644 --- a/.backportrc.json +++ b/.backportrc.json @@ -1,9 +1,9 @@ { "upstream" : "elastic/elasticsearch", - "targetBranchChoices" : [ "main", "8.13", "8.12", "8.11", "8.10", "8.9", "8.8", "8.7", "8.6", "8.5", "8.4", "8.3", "8.2", "8.1", "8.0", "7.17", "6.8" ], + "targetBranchChoices" : [ "main", "8.14", "8.13", "8.12", "8.11", "8.10", "8.9", "8.8", "8.7", "8.6", "8.5", "8.4", "8.3", "8.2", "8.1", "8.0", "7.17", "6.8" ], "targetPRLabels" : [ "backport" ], "branchLabelMapping" : { - "^v8.14.0$" : "main", + "^v8.15.0$" : "main", "^v(\\d+).(\\d+).\\d+(?:-(?:alpha|beta|rc)\\d+)?$" : "$1.$2" } } \ No newline at end of file diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index 18a93c9b63a3e..8103b40cbaff0 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -48,7 +48,7 @@ steps: timeout_in_minutes: 300 matrix: setup: - BWC_VERSION: ["7.17.21", "8.13.3", "8.14.0"] + BWC_VERSION: ["7.17.21", "8.13.3", "8.14.0", "8.15.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/pipelines/lucene-snapshot/run-tests.yml b/.buildkite/pipelines/lucene-snapshot/run-tests.yml index 15d78f8495ca8..a5d3c4e5f7935 100644 --- a/.buildkite/pipelines/lucene-snapshot/run-tests.yml +++ b/.buildkite/pipelines/lucene-snapshot/run-tests.yml @@ -32,6 +32,14 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: custom-32-98304 buildDirectory: /dev/shm/bk + - label: part4 + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart4 + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk - group: bwc-snapshots steps: - label: "{{matrix.BWC_VERSION}} / bwc-snapshots" diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml index c306e1d9f63cb..347b7ddde752e 100644 --- a/.buildkite/pipelines/periodic-packaging.yml +++ b/.buildkite/pipelines/periodic-packaging.yml @@ -561,6 +561,22 @@ steps: env: BWC_VERSION: 8.14.0 + - label: "{{matrix.image}} / 8.15.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.15.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.15.0 + - group: packaging-tests-windows steps: - label: "{{matrix.image}} / packaging-tests-windows" diff --git a/.buildkite/pipelines/periodic-platform-support.yml b/.buildkite/pipelines/periodic-platform-support.yml index 0240fd03f4a89..a3922d8226924 100644 --- a/.buildkite/pipelines/periodic-platform-support.yml +++ b/.buildkite/pipelines/periodic-platform-support.yml @@ -47,6 +47,7 @@ steps: - checkPart1 - checkPart2 - checkPart3 + - checkPart4 - checkRestCompat agents: provider: gcp @@ -70,6 +71,7 @@ steps: - checkPart1 - checkPart2 - checkPart3 + - checkPart4 - checkRestCompat agents: provider: aws diff --git a/.buildkite/pipelines/periodic.template.yml b/.buildkite/pipelines/periodic.template.yml index 05d516992a7f6..7315dc9de260f 100644 --- a/.buildkite/pipelines/periodic.template.yml +++ b/.buildkite/pipelines/periodic.template.yml @@ -49,6 +49,7 @@ steps: - checkPart1 - checkPart2 - checkPart3 + - checkPart4 - checkRestCompat agents: provider: gcp @@ -89,6 +90,7 @@ steps: - checkPart1 - checkPart2 - checkPart3 + - checkPart4 - checkRestCompat agents: provider: gcp diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index 3410436eda2bf..64ea30266bf33 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -332,6 +332,16 @@ steps: buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.14.0 + - label: 8.15.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.15.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: n1-standard-32 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.15.0 - label: concurrent-search-tests command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dtests.jvm.argline=-Des.concurrent_search=true -Des.concurrent_search=true functionalTests timeout_in_minutes: 420 @@ -380,6 +390,7 @@ steps: - checkPart1 - checkPart2 - checkPart3 + - checkPart4 - checkRestCompat agents: provider: gcp @@ -396,7 +407,7 @@ steps: setup: ES_RUNTIME_JAVA: - openjdk17 - BWC_VERSION: ["7.17.21", "8.13.3", "8.14.0"] + BWC_VERSION: ["7.17.21", "8.13.3", "8.14.0", "8.15.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 @@ -420,6 +431,7 @@ steps: - checkPart1 - checkPart2 - checkPart3 + - checkPart4 - checkRestCompat agents: provider: gcp @@ -438,7 +450,7 @@ steps: - graalvm-ce17 - openjdk17 - openjdk21 - BWC_VERSION: ["7.17.21", "8.13.3", "8.14.0"] + BWC_VERSION: ["7.17.21", "8.13.3", "8.14.0", "8.15.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 46165da472e74..32a5ef8f8d1e5 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -32,3 +32,4 @@ BWC_VERSION: - "8.12.2" - "8.13.3" - "8.14.0" + - "8.15.0" diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index dfd238a041b1e..6ee9691a9e5ee 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -2,3 +2,4 @@ BWC_VERSION: - "7.17.21" - "8.13.3" - "8.14.0" + - "8.15.0" diff --git a/branches.json b/branches.json index 772693505b9e0..daf6d249f7268 100644 --- a/branches.json +++ b/branches.json @@ -4,6 +4,9 @@ { "branch": "main" }, + { + "branch": "8.14" + }, { "branch": "8.13" }, diff --git a/build-tools-internal/version.properties b/build-tools-internal/version.properties index 0883097e75aad..d3d528cbff494 100644 --- a/build-tools-internal/version.properties +++ b/build-tools-internal/version.properties @@ -1,4 +1,4 @@ -elasticsearch = 8.14.0 +elasticsearch = 8.15.0 lucene = 9.10.0 bundled_jdk_vendor = openjdk diff --git a/docs/changelog/105682.yaml b/docs/changelog/105682.yaml deleted file mode 100644 index f1713357ace80..0000000000000 --- a/docs/changelog/105682.yaml +++ /dev/null @@ -1,20 +0,0 @@ -pr: 105682 -summary: Introduce global retention in data stream lifecycle. -area: Data streams -type: feature -issues: - - 106169 -highlight: - title: Add global retention in data stream lifecycle - body: |- - Data stream lifecycle now supports configuring retention on a cluster level, namely global retention. Global retention - allows us to configure two different retentions: - - - `default_retention` is applied to all data streams managed by the data stream lifecycle that do not have retention - defined on the data stream level. - - `max_retention` is applied to all data streams managed by the data stream lifecycle and it allows any data stream - data to be deleted after the `max_retention` has passed. - - Furthermore, we introduce the term `effective_retention` which is the retention applied at a certain moment to a data - stream considering all the available retention configurations. - notable: true \ No newline at end of file diff --git a/docs/changelog/107383.yaml b/docs/changelog/107383.yaml new file mode 100644 index 0000000000000..07886ac96180c --- /dev/null +++ b/docs/changelog/107383.yaml @@ -0,0 +1,6 @@ +pr: 107383 +summary: Users with monitor privileges can access async_search/status endpoint + even when setting keep_alive +area: Authorization +type: bug +issues: [] diff --git a/docs/changelog/107494.yaml b/docs/changelog/107494.yaml new file mode 100644 index 0000000000000..1d71ce284a4a8 --- /dev/null +++ b/docs/changelog/107494.yaml @@ -0,0 +1,6 @@ +pr: 107494 +summary: Handle infinity during synthetic source construction for scaled float field +area: Mapping +type: bug +issues: + - 107101 diff --git a/docs/changelog/107551.yaml b/docs/changelog/107551.yaml new file mode 100644 index 0000000000000..78e64cc526638 --- /dev/null +++ b/docs/changelog/107551.yaml @@ -0,0 +1,5 @@ +pr: 107551 +summary: Avoid attempting to load the same empty field twice in fetch phase +area: Search +type: bug +issues: [] diff --git a/docs/reference/data-streams/data-stream-apis.asciidoc b/docs/reference/data-streams/data-stream-apis.asciidoc index d525f0d8a7885..c13703ab2a6ee 100644 --- a/docs/reference/data-streams/data-stream-apis.asciidoc +++ b/docs/reference/data-streams/data-stream-apis.asciidoc @@ -27,12 +27,6 @@ preview:[] preview:[] * <> preview:[] -* <> -preview:[] -* <> -preview:[] -* <> -preview:[] The following API is available for <>: @@ -65,10 +59,4 @@ include::{es-ref-dir}/data-streams/lifecycle/apis/explain-lifecycle.asciidoc[] include::{es-ref-dir}/data-streams/lifecycle/apis/get-lifecycle-stats.asciidoc[] -include::{es-ref-dir}/data-streams/lifecycle/apis/put-global-retention.asciidoc[] - -include::{es-ref-dir}/data-streams/lifecycle/apis/get-global-retention.asciidoc[] - -include::{es-ref-dir}/data-streams/lifecycle/apis/delete-global-retention.asciidoc[] - include::{es-ref-dir}/indices/downsample-data-stream.asciidoc[] diff --git a/docs/reference/data-streams/lifecycle/apis/delete-global-retention.asciidoc b/docs/reference/data-streams/lifecycle/apis/delete-global-retention.asciidoc deleted file mode 100644 index 5b211eaf09e13..0000000000000 --- a/docs/reference/data-streams/lifecycle/apis/delete-global-retention.asciidoc +++ /dev/null @@ -1,121 +0,0 @@ -[[data-streams-delete-global-retention]] -=== Delete the global retention of data streams -++++ -Delete Data Stream Global Retention -++++ - -preview::[] - -Deletes the global retention configuration that applies on every data stream managed by <>. - -[[delete-global-retention-api-prereqs]] -==== {api-prereq-title} - -** If the {es} {security-features} are enabled, you must have the `manage_data_stream_global_retention` <> to use this API. - -[[data-streams-delete-global-retention-request]] -==== {api-request-title} - -`DELETE _data_stream/_global_retention` - -[[data-streams-delete-global-retention-desc]] -==== {api-description-title} - -Deletes the global retention configuration that is applied on data streams managed by data stream lifecycle. - -[role="child_attributes"] -[[delete-global-retention-api-query-parms]] -==== {api-query-parms-title} - -`dry_run`:: -(Boolean) Signals that the request should determine the effect of the removal of the existing without updating -the global retention. The default value is `false`, which means the removal will happen. - -[[delete-global-retention-api-response-body]] -==== {api-response-body-title} - -`acknowledged`:: -(boolean) -True, if the global retention has been removed. False, if it fails or if it was a dry run. - -`dry_run`:: -(boolean) -True, if this was a dry run, false otherwise. - -`affected_data_streams`:: -(array of objects) -Contains information about the data streams affected by the change. -+ -.Properties of objects in `affected_data_streams` -[%collapsible%open] -==== -`name`:: -(string) -Name of the data stream. -`previous_effective_retention`:: -(string) -The retention that was effective before the change of this request. `infinite` if there was no retention applicable. -`new_effective_retention`:: -(string) -The retention that is or would be effective after this request. `infinite` if there is no retention applicable. -==== - -[[data-streams-delete-global-retention-example]] -==== {api-examples-title} - -//// - -[source,console] --------------------------------------------------- -PUT _data_stream/_global_retention -{ - "default_retention": "7d", - "max_retention": "90d" -} - -PUT /_index_template/template -{ - "index_patterns": ["my-data-stream*"], - "template": { - "lifecycle": {} - }, - "data_stream": { } -} - -PUT /_data_stream/my-data-stream ----- -// TESTSETUP -//// - -//// -[source,console] ----- -DELETE /_data_stream/my-data-stream* -DELETE /_index_template/template -DELETE /_data_stream/_global_retention ----- -// TEARDOWN -//// - -Let's update the global retention: -[source,console] --------------------------------------------------- -DELETE _data_stream/_global_retention --------------------------------------------------- - -The response will look like the following: - -[source,console-result] --------------------------------------------------- -{ - "acknowledged": true, - "dry_run": false, - "affected_data_streams": [ - { - "name": "my-data-stream", - "previous_effective_retention": "7d", - "new_effective_retention": "infinite" - } - ] -} --------------------------------------------------- diff --git a/docs/reference/data-streams/lifecycle/apis/get-global-retention.asciidoc b/docs/reference/data-streams/lifecycle/apis/get-global-retention.asciidoc deleted file mode 100644 index 0997c2d84ece2..0000000000000 --- a/docs/reference/data-streams/lifecycle/apis/get-global-retention.asciidoc +++ /dev/null @@ -1,90 +0,0 @@ -[[data-streams-get-global-retention]] -=== Get the global retention of data streams -++++ -Get Data Stream Global Retention -++++ - -preview::[] - -Gets the global retention that applies on every data stream managed by <>. - -[[get-global-retention-api-prereqs]] -==== {api-prereq-title} - -** If the {es} {security-features} are enabled, you must have the `monitor_data_stream_global_retention` or -`manage_data_stream_global_retention` <> to use this API. - -[[data-streams-get-global-retention-request]] -==== {api-request-title} - -`GET _data_stream/_global_retention` - -[[data-streams-get-global-retention-desc]] -==== {api-description-title} - -Gets the global retention configuration that is applied on data streams managed by data stream lifecycle. - -[role="child_attributes"] -[[get-global-retention-api-query-parms]] -==== {api-query-parms-title} - -include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=local] - -include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=master-timeout] - -[[get-global-retention-api-response-body]] -==== {api-response-body-title} - -`default_retention`:: -(Optional, string) -The default retention that will apply to any data stream managed by data stream lifecycle that does not have a retention -defined on the data stream level. - -`max_retention`:: -(Optional, string) -The max retention that will apply to all data streams managed by data stream lifecycle. The max retention will override the -retention of a data stream whose retention exceeds the max retention. - - -[[data-streams-get-global-retention-example]] -==== {api-examples-title} - -//// - -[source,console] --------------------------------------------------- -PUT _data_stream/_global_retention -{ - "default_retention": "7d", - "max_retention": "90d" -} --------------------------------------------------- -// TESTSETUP - -[source,console] --------------------------------------------------- -DELETE _data_stream/_global_retention --------------------------------------------------- -// TEARDOWN - -//// - -Let's retrieve the global retention: - -[source,console] --------------------------------------------------- -GET _data_stream/_global_retention --------------------------------------------------- - -The response will look like the following: - -[source,console-result] --------------------------------------------------- -{ - "default_retention": "7d", <1> - "max_retention": "90d" <2> -} --------------------------------------------------- -<1> 7 days retention will be applied to any data stream that does not have retention set in its lifecycle. -<2> 90 days retention will be applied to all data streams that have retention that exceeds the 90 days, this -applies to data streams that have infinite retention too. \ No newline at end of file diff --git a/docs/reference/data-streams/lifecycle/apis/get-lifecycle.asciidoc b/docs/reference/data-streams/lifecycle/apis/get-lifecycle.asciidoc index 83955417abd0b..0d80a31bd4f5a 100644 --- a/docs/reference/data-streams/lifecycle/apis/get-lifecycle.asciidoc +++ b/docs/reference/data-streams/lifecycle/apis/get-lifecycle.asciidoc @@ -130,18 +130,14 @@ The response will look like the following: "name": "my-data-stream-1", "lifecycle": { "enabled": true, - "data_retention": "7d", - "effective_retention": "7d", - "retention_determined_by": "data_stream_configuration" + "data_retention": "7d" } }, { "name": "my-data-stream-2", "lifecycle": { "enabled": true, - "data_retention": "7d", - "effective_retention": "7d", - "retention_determined_by": "data_stream_configuration" + "data_retention": "7d" } } ] diff --git a/docs/reference/data-streams/lifecycle/apis/put-global-retention.asciidoc b/docs/reference/data-streams/lifecycle/apis/put-global-retention.asciidoc deleted file mode 100644 index c9bc804c13408..0000000000000 --- a/docs/reference/data-streams/lifecycle/apis/put-global-retention.asciidoc +++ /dev/null @@ -1,131 +0,0 @@ -[[data-streams-put-global-retention]] -=== Update the global retention of data streams -++++ -Update Data Stream Global Retention -++++ - -preview::[] - -Updates the global retention configuration that applies on every data stream managed by <>. - -[[put-global-retention-api-prereqs]] -==== {api-prereq-title} - -** If the {es} {security-features} are enabled, you must have the `manage_data_stream_global_retention` <> to use this API. - -[[data-streams-put-global-retention-request]] -==== {api-request-title} - -`PUT _data_stream/_global_retention` - -[[data-streams-put-global-retention-desc]] -==== {api-description-title} - -Updates the global retention configuration that is applied on data streams managed by data stream lifecycle. - -[role="child_attributes"] -[[put-global-retention-api-query-parms]] -==== {api-query-parms-title} - -`dry_run`:: -(Boolean) Signals that the request should determine the effect of the provided configuration without updating the -global retention settings. The default value is `false`, which means the configuration provided will be applied. - -[[put-global-retention-api-request-body]] -==== {api-request-body-title} - -`default_retention`:: -(Optional, string) -The default retention that will apply to any data stream managed by data stream lifecycle that does not have a retention -defined on the data stream level. - -`max_retention`:: -(Optional, string) -The max retention that will apply to all data streams managed by data stream lifecycle. The max retention will override the -retention of a data stream which retention exceeds the max retention. - -[[put-global-retention-api-response-body]] -==== {api-response-body-title} - -`acknowledged`:: -(boolean) -True, if the global retention has been updated to the provided values. False, if it fails or if it was a dry run. - -`dry_run`:: -(boolean) -True, if this was a dry run, false otherwise. - -`affected_data_streams`:: -(array of objects) -Contains information about the data streams affected by the change. -+ -.Properties of objects in `affected_data_streams` -[%collapsible%open] -==== -`name`:: -(string) -Name of the data stream. -`previous_effective_retention`:: -(string) -The retention that was effective before the change of this request. `infinite` if there was no retention applicable. -`new_effective_retention`:: -(string) -The retention that is or would be effective after this request. `infinite` if there is no retention applicable. -==== - -[[data-streams-put-global-retention-example]] -==== {api-examples-title} - -//// -[source,console] ----- -PUT /_index_template/template -{ - "index_patterns": ["my-data-stream*"], - "template": { - "lifecycle": {} - }, - "data_stream": { } -} - -PUT /_data_stream/my-data-stream ----- -// TESTSETUP -//// - -//// -[source,console] ----- -DELETE /_data_stream/my-data-stream* -DELETE /_index_template/template -DELETE /_data_stream/_global_retention ----- -// TEARDOWN -//// - -Let's update the global retention: -[source,console] --------------------------------------------------- -PUT _data_stream/_global_retention -{ - "default_retention": "7d", - "max_retention": "90d" -} --------------------------------------------------- - -The response will look like the following: - -[source,console-result] --------------------------------------------------- -{ - "acknowledged": true, - "dry_run": false, - "affected_data_streams": [ - { - "name": "my-data-stream", - "previous_effective_retention": "infinite", - "new_effective_retention": "7d" - } - ] -} --------------------------------------------------- diff --git a/docs/reference/data-streams/lifecycle/index.asciidoc b/docs/reference/data-streams/lifecycle/index.asciidoc index dff3dae22f8ef..bf861df7c80d4 100644 --- a/docs/reference/data-streams/lifecycle/index.asciidoc +++ b/docs/reference/data-streams/lifecycle/index.asciidoc @@ -16,8 +16,7 @@ To achieve that, it supports: * Automatic <>, which chunks your incoming data in smaller pieces to facilitate better performance and backwards incompatible mapping changes. * Configurable retention, which allows you to configure the time period for which your data is guaranteed to be stored. -{es} is allowed at a later time to delete data older than this time period. Retention can be configured on the data stream level -or on a global level. Read more about the different options in this <>. +{es} is allowed at a later time to delete data older than this time period. A data stream lifecycle also supports downsampling the data stream backing indices. See <> for @@ -43,10 +42,9 @@ data that is most likely to keep being queried. 4. If <> is configured it will execute all the configured downsampling rounds. 5. Applies retention to the remaining backing indices. This means deleting the backing indices whose -`generation_time` is longer than the effective retention period (read more about the -<>). The `generation_time` is only applicable to rolled -over backing indices and it is either the time since the backing index got rolled over, or the time optionally configured -in the <> setting. +`generation_time` is longer than the configured retention period. The `generation_time` is only applicable to rolled over backing +indices and it is either the time since the backing index got rolled over, or the time optionally configured in the +<> setting. IMPORTANT: We use the `generation_time` instead of the creation time because this ensures that all data in the backing index have passed the retention period. As a result, the retention period is not the exact time data gets deleted, but @@ -79,6 +77,4 @@ include::tutorial-manage-new-data-stream.asciidoc[] include::tutorial-manage-existing-data-stream.asciidoc[] -include::tutorial-manage-data-stream-retention.asciidoc[] - include::tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc[] diff --git a/docs/reference/data-streams/lifecycle/tutorial-manage-data-stream-retention.asciidoc b/docs/reference/data-streams/lifecycle/tutorial-manage-data-stream-retention.asciidoc deleted file mode 100644 index 7b84cd238ce49..0000000000000 --- a/docs/reference/data-streams/lifecycle/tutorial-manage-data-stream-retention.asciidoc +++ /dev/null @@ -1,183 +0,0 @@ -[role="xpack"] -[[tutorial-manage-data-stream-retention]] -=== Tutorial: Data stream retention - -preview::[] - -In this tutorial, we are going to go over the data stream lifecycle retention, define it, go over how it can be configured and how -it can be applied. Keep in mind, the following options apply only to data streams that are managed by the data stream lifecycle. - -. <> -. <> -. <> -. <> - -You can verify if a data steam is managed by the data stream lifecycle via the <>: - -//// -[source,console] ----- -PUT /_index_template/template -{ - "index_patterns": ["my-data-stream*"], - "template": { - "lifecycle": {} - }, - "data_stream": { } -} - -PUT /_data_stream/my-data-stream ----- -// TESTSETUP -//// - -//// -[source,console] ----- -DELETE /_data_stream/my-data-stream* -DELETE /_index_template/template -DELETE /_data_stream/_global_retention ----- -// TEARDOWN -//// - -[source,console] --------------------------------------------------- -GET _data_stream/my-data-stream/_lifecycle --------------------------------------------------- - -The result should look like this: - -[source,console-result] --------------------------------------------------- -{ - "data_streams": [ - { - "name": "my-data-stream", <1> - "lifecycle": { - "enabled": true <2> - } - } - ] -} --------------------------------------------------- -// TESTRESPONSE[skip:the result is for illustrating purposes only] -<1> The name of your data stream. -<2> Ensure that the lifecycle is enabled, meaning this should be `true`. - -[discrete] -[[what-is-retention]] -==== What is data stream retention? - -We define retention as the least amount of time the data of a data stream are going to be kept in {es}. After this time period -has passed, {es} is allowed to remove these data to free up space and/or manage costs. - -NOTE: Retention does not define the period that the data will be removed, but the minimum time period they will be kept. - -We define 4 different types of retention: - -* The data stream retention, or `data_retention`, which is the retention configured on the data stream level. It can be -set via an <> for future data streams or via the <> for an existing data stream. When the data stream retention is not set, it implies that the data -need to be kept forever. -* The global default retention, or `default_retention`, which is a retention configured on a cluster level and will be -applied to all data streams managed by data stream lifecycle that do not have `data_retention` configured. Effectively, -it ensures that there will be no data streams keeping their data forever. This can be set via the -<>. -* The global max retention, or `max_retention`, which is a retention configured on a cluster level and will be applied to -all data streams managed by data stream lifecycle. Effectively, it ensures that there will be no data streams whose retention -will exceed this time period. This can be set via the <>. -* The effective retention, or `effective_retention`, which is the retention applied at a data stream on a given moment. -Effective retention cannot be set, it is derived by taking into account all the configured retention listed above and is -calculated as it is described <>. - -[discrete] -[[retention-configuration]] -==== How to configure retention? - -- By setting the `data_retention` on the data stream level. This retention can be configured in two ways: -+ --- For new data streams, it can be defined in the index template that would be applied during the data stream's creation. -You can use the <>, for example: -+ -[source,console] --------------------------------------------------- -PUT _index_template/template -{ - "index_patterns": ["my-data-stream*"], - "data_stream": { }, - "priority": 500, - "template": { - "lifecycle": { - "data_retention": "7d" - } - }, - "_meta": { - "description": "Template with data stream lifecycle" - } -} --------------------------------------------------- --- For an existing data stream, it can be set via the <>. -+ -[source,console] ----- -PUT _data_stream/my-data-stream/_lifecycle -{ - "data_retention": "30d" <1> -} ----- -// TEST[continued] -<1> The retention period of this data stream is set to 30 days. - -- By setting the global retention via the `default_retention` and `max_retention` that are set on a cluster level. You -can set them via the <>. For example: -+ -[source,console] --------------------------------------------------- -PUT _data_stream/_global_retention -{ - "default_retention": "7d", - "max_retention": "90d" -} --------------------------------------------------- -// TEST[continued] - -[discrete] -[[effective-retention-calculation]] -==== How is the effective retention calculated? -The effective is calculated in the following way: - -- The `effective_retention` is the `default_retention`, when `default_retention` is defined and the data stream does not -have `data_retention`. -- The `effective_retention` is the `data_retention`, when `data_retention` is defined and if `max_retention` is defined, -it is less than the `max_retention`. -- The `effective_retention` is the `max_retention`, when `max_retention` is defined, and the data stream has either no -`data_retention` or its `data_retention` is greater than the `max_retention`. - -The above is demonstrated in the examples below: - -|=== -|`default_retention` |`max_retention` |`data_retention` |`effective_retention` |Retention determined by - -|Not set |Not set |Not set |Infinite |N/A -|Not relevant |12 months |**30 days** |30 days |`data_retention` -|Not relevant |Not set |**30 days** |30 days |`data_retention` -|**30 days** |12 months |Not set |30 days |`default_retention` -|**30 days** |30 days |Not set |30 days |`default_retention` -|Not relevant |**30 days** |12 months |30 days |`max_retention` -|Not set |**30 days** |Not set |30 days |`max_retention` -|=== - -[discrete] -[[effective-retention-application]] -==== How is the effective retention applied? - -Retention is applied to the remaining backing indices of a data stream as the last step of -<>. Data stream lifecycle will retrieve the backing indices -whose `generation_time` is longer than the effective retention period and delete them. The `generation_time` is only -applicable to rolled over backing indices and it is either the time since the backing index got rolled over, or the time -optionally configured in the <> setting. - -IMPORTANT: We use the `generation_time` instead of the creation time because this ensures that all data in the backing -index have passed the retention period. As a result, the retention period is not the exact time data get deleted, but -the minimum time data will be stored. \ No newline at end of file diff --git a/docs/reference/data-streams/lifecycle/tutorial-manage-existing-data-stream.asciidoc b/docs/reference/data-streams/lifecycle/tutorial-manage-existing-data-stream.asciidoc index 7be2b30b9b83c..5670faaade3ce 100644 --- a/docs/reference/data-streams/lifecycle/tutorial-manage-existing-data-stream.asciidoc +++ b/docs/reference/data-streams/lifecycle/tutorial-manage-existing-data-stream.asciidoc @@ -74,9 +74,7 @@ The response will look like: "generation_time": "6.84s", <9> "lifecycle": { "enabled": true, - "data_retention": "30d", - "effective_retention": "30d" <10> - "retention_determined_by": "data_stream_configuration" + "data_retention": "30d" <10> } } } diff --git a/docs/reference/data-streams/lifecycle/tutorial-manage-new-data-stream.asciidoc b/docs/reference/data-streams/lifecycle/tutorial-manage-new-data-stream.asciidoc index ecfdc16884082..6f1d81ab6ead2 100644 --- a/docs/reference/data-streams/lifecycle/tutorial-manage-new-data-stream.asciidoc +++ b/docs/reference/data-streams/lifecycle/tutorial-manage-new-data-stream.asciidoc @@ -93,12 +93,10 @@ The result will look like this: { "data_streams": [ { - "name": "my-data-stream", <1> + "name": "my-data-stream",<1> "lifecycle": { - "enabled": true, <2> - "data_retention": "7d", <3> - "effective_retention": "7d", <4> - "retention_determined_by": "data_stream_configuration" <5> + "enabled": true, <2> + "data_retention": "7d" <3> } } ] @@ -106,11 +104,8 @@ The result will look like this: -------------------------------------------------- <1> The name of your data stream. <2> Shows if the data stream lifecycle is enabled for this data stream. -<3> The desired retention period of the data indexed in this data stream, this means that if there are no other limitations -the data for this data stream will be preserved for at least 7 days. -<4> The effective retention, this means that the data in this data stream will +<3> The retention period of the data indexed in this data stream, this means that the data in this data stream will be kept at least for 7 days. After that {es} can delete it at its own discretion. -<5> The configuration that determined the effective retention. If you want to see more information about how the data stream lifecycle is applied on individual backing indices use the <>: @@ -133,9 +128,7 @@ The result will look like this: "time_since_index_creation": "1.6m", <3> "lifecycle": { <4> "enabled": true, - "data_retention": "7d", - "effective_retention": "7d", - "retention_determined_by": "data_stream_configuration" + "data_retention": "7d" } } } diff --git a/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc b/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc index 65eaf472890f4..3125c82120d8d 100644 --- a/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc +++ b/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc @@ -200,10 +200,10 @@ PUT _index_template/dsl-data-stream-template "template": { "settings": { "index.lifecycle.name": "pre-dsl-ilm-policy", - "index.lifecycle.prefer_ilm": false <1> + "index.lifecycle.prefer_ilm": false <1> }, - "lifecycle": { <2> - "data_retention": "7d" <3> + "lifecycle": { + "data_retention": "7d" <2> } } } @@ -215,8 +215,6 @@ PUT _index_template/dsl-data-stream-template precedence over data stream lifecycle. <2> We're configuring the data stream lifecycle so _new_ data streams will be managed by data stream lifecycle. -<3> The desired retention, meaning that this data stream should keep the data for at least 7 days, -if this retention is possible. We've now made sure that new data streams will be managed by data stream lifecycle. @@ -270,9 +268,7 @@ GET _data_stream/dsl-data-stream "template": "dsl-data-stream-template", "lifecycle": { "enabled": true, - "data_retention": "7d", - "effective_retention": "7d", - "retention_determined_by": "data_stream_configuration" + "data_retention": "7d" }, "ilm_policy": "pre-dsl-ilm-policy", "next_generation_managed_by": "Data stream lifecycle", <3> @@ -350,9 +346,7 @@ GET _data_stream/dsl-data-stream "template": "dsl-data-stream-template", "lifecycle": { "enabled": true, - "data_retention": "7d", - "effective_retention": "7d", - "retention_determined_by": "data_stream_configuration" + "data_retention": "7d" }, "ilm_policy": "pre-dsl-ilm-policy", "next_generation_managed_by": "Data stream lifecycle", diff --git a/docs/reference/migration/index.asciidoc b/docs/reference/migration/index.asciidoc index c524380547839..51a2898b5d598 100644 --- a/docs/reference/migration/index.asciidoc +++ b/docs/reference/migration/index.asciidoc @@ -1,5 +1,6 @@ include::migration_intro.asciidoc[] +* <> * <> * <> * <> @@ -16,6 +17,7 @@ include::migration_intro.asciidoc[] * <> * <> +include::migrate_8_15.asciidoc[] include::migrate_8_14.asciidoc[] include::migrate_8_13.asciidoc[] include::migrate_8_12.asciidoc[] diff --git a/docs/reference/migration/migrate_8_15.asciidoc b/docs/reference/migration/migrate_8_15.asciidoc new file mode 100644 index 0000000000000..a183e68a50693 --- /dev/null +++ b/docs/reference/migration/migrate_8_15.asciidoc @@ -0,0 +1,20 @@ +[[migrating-8.15]] +== Migrating to 8.15 +++++ +8.15 +++++ + +This section discusses the changes that you need to be aware of when migrating +your application to {es} 8.15. + +See also <> and <>. + +coming::[8.15.0] + + +[discrete] +[[breaking-changes-8.15]] +=== Breaking changes + +There are no breaking changes in {es} 8.15. + diff --git a/docs/reference/query-dsl/text-expansion-query.asciidoc b/docs/reference/query-dsl/text-expansion-query.asciidoc index 27fca2bb56375..5b3f98b5e1ea8 100644 --- a/docs/reference/query-dsl/text-expansion-query.asciidoc +++ b/docs/reference/query-dsl/text-expansion-query.asciidoc @@ -232,12 +232,12 @@ GET my-index/_search "text_expansion":{ "ml.tokens":{ "model_id":".elser_model_2", - "model_text":"How is the weather in Jamaica?" - }, - "pruning_config": { - "tokens_freq_ratio_threshold": 5, - "tokens_weight_threshold": 0.4, - "only_score_pruned_tokens": false + "model_text":"How is the weather in Jamaica?", + "pruning_config": { + "tokens_freq_ratio_threshold": 5, + "tokens_weight_threshold": 0.4, + "only_score_pruned_tokens": false + } } } }, @@ -248,12 +248,12 @@ GET my-index/_search "text_expansion": { "ml.tokens": { "model_id": ".elser_model_2", - "model_text": "How is the weather in Jamaica?" - }, - "pruning_config": { - "tokens_freq_ratio_threshold": 5, - "tokens_weight_threshold": 0.4, - "only_score_pruned_tokens": true + "model_text": "How is the weather in Jamaica?", + "pruning_config": { + "tokens_freq_ratio_threshold": 5, + "tokens_weight_threshold": 0.4, + "only_score_pruned_tokens": true + } } } } diff --git a/docs/reference/release-notes.asciidoc b/docs/reference/release-notes.asciidoc index 05c97d51a38e7..3cef5cc88bbb7 100644 --- a/docs/reference/release-notes.asciidoc +++ b/docs/reference/release-notes.asciidoc @@ -6,6 +6,7 @@ This section summarizes the changes in each release. +* <> * <> * <> * <> @@ -64,6 +65,7 @@ This section summarizes the changes in each release. -- +include::release-notes/8.15.0.asciidoc[] include::release-notes/8.14.0.asciidoc[] include::release-notes/8.13.2.asciidoc[] include::release-notes/8.13.1.asciidoc[] diff --git a/docs/reference/release-notes/8.15.0.asciidoc b/docs/reference/release-notes/8.15.0.asciidoc new file mode 100644 index 0000000000000..97f4a51a1142f --- /dev/null +++ b/docs/reference/release-notes/8.15.0.asciidoc @@ -0,0 +1,8 @@ +[[release-notes-8.15.0]] +== {es} version 8.15.0 + +coming[8.15.0] + +Also see <>. + + diff --git a/docs/reference/release-notes/highlights.asciidoc b/docs/reference/release-notes/highlights.asciidoc index 8d9d743a239f5..8c1590d17288f 100644 --- a/docs/reference/release-notes/highlights.asciidoc +++ b/docs/reference/release-notes/highlights.asciidoc @@ -11,7 +11,8 @@ For detailed information about this release, see the <> and // Add previous release to the list Other versions: -{ref-bare}/8.13/release-highlights.html[8.13] +{ref-bare}/8.14/release-highlights.html[8.14] +| {ref-bare}/8.13/release-highlights.html[8.13] | {ref-bare}/8.12/release-highlights.html[8.12] | {ref-bare}/8.11/release-highlights.html[8.11] | {ref-bare}/8.10/release-highlights.html[8.10] @@ -28,24 +29,13 @@ Other versions: endif::[] +// The notable-highlights tag marks entries that +// should be featured in the Stack Installation and Upgrade Guide: // tag::notable-highlights[] - -[discrete] -[[add_global_retention_in_data_stream_lifecycle]] -=== Add global retention in data stream lifecycle -Data stream lifecycle now supports configuring retention on a cluster level, namely global retention. Global retention -allows us to configure two different retentions: - -- `default_retention` is applied to all data streams managed by the data stream lifecycle that do not have retention -defined on the data stream level. -- `max_retention` is applied to all data streams managed by the data stream lifecycle and it allows any data stream -data to be deleted after the `max_retention` has passed. - -Furthermore, we introduce the term `effective_retention` which is the retention applied at a certain moment to a data -stream considering all the available retention configurations. - -{es-pull}105682[#105682] - +// [discrete] +// === Heading +// +// Description. // end::notable-highlights[] diff --git a/docs/reference/search/profile.asciidoc b/docs/reference/search/profile.asciidoc index 3fed14231808c..48c65ed0abc7b 100644 --- a/docs/reference/search/profile.asciidoc +++ b/docs/reference/search/profile.asciidoc @@ -194,7 +194,7 @@ The API returns the following result: "load_source_count": 5 }, "debug": { - "stored_fields": ["_id", "_routing", "_source"] + "stored_fields": ["_id", "_ignored", "_routing", "_source"] }, "children": [ { @@ -1051,7 +1051,7 @@ And here is the fetch profile: "load_source_count": 5 }, "debug": { - "stored_fields": ["_id", "_routing", "_source"] + "stored_fields": ["_id", "_ignored", "_routing", "_source"] }, "children": [ { diff --git a/docs/reference/troubleshooting/snapshot/add-repository.asciidoc b/docs/reference/troubleshooting/snapshot/add-repository.asciidoc index 386c2561c03c6..e70bd244df3a5 100644 --- a/docs/reference/troubleshooting/snapshot/add-repository.asciidoc +++ b/docs/reference/troubleshooting/snapshot/add-repository.asciidoc @@ -2,8 +2,12 @@ == Troubleshooting broken repositories There are several situations where the <> might report an issue -regarding the integrity of snapshot repositories in the cluster. This page explains -the recommended actions for diagnosing corrupted, unknown, and invalid repositories. +regarding the integrity of snapshot repositories in the cluster. The following pages explain +the recommended actions for diagnosing corrupted, unknown, and invalid repositories: + +- <> +- <> +- <> [[diagnosing-corrupted-repositories]] === Diagnosing corrupted repositories diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudDataStreamLifecycleIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudDataStreamLifecycleIT.java index b772e0bb347e2..d43dad87a6067 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudDataStreamLifecycleIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudDataStreamLifecycleIT.java @@ -9,12 +9,12 @@ package org.elasticsearch.datastreams.lifecycle; import org.elasticsearch.action.datastreams.CreateDataStreamAction; +import org.elasticsearch.action.datastreams.lifecycle.GetDataStreamLifecycleAction; +import org.elasticsearch.action.datastreams.lifecycle.PutDataStreamLifecycleAction; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.core.TimeValue; import org.elasticsearch.datastreams.DataStreamsPlugin; import org.elasticsearch.datastreams.lifecycle.action.DeleteDataStreamLifecycleAction; -import org.elasticsearch.datastreams.lifecycle.action.GetDataStreamLifecycleAction; -import org.elasticsearch.datastreams.lifecycle.action.PutDataStreamLifecycleAction; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.transport.MockTransportService; diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java index 9880e5e9914a8..5ebdbd272f3fe 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java @@ -27,7 +27,9 @@ import org.elasticsearch.action.datastreams.GetDataStreamAction; import org.elasticsearch.action.datastreams.ModifyDataStreamsAction; import org.elasticsearch.action.datastreams.lifecycle.ErrorEntry; +import org.elasticsearch.action.datastreams.lifecycle.ExplainDataStreamLifecycleAction; import org.elasticsearch.action.datastreams.lifecycle.ExplainIndexDataStreamLifecycle; +import org.elasticsearch.action.datastreams.lifecycle.PutDataStreamLifecycleAction; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.cluster.coordination.StableMasterHealthIndicatorService; @@ -45,8 +47,6 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.datastreams.DataStreamsPlugin; -import org.elasticsearch.datastreams.lifecycle.action.ExplainDataStreamLifecycleAction; -import org.elasticsearch.datastreams.lifecycle.action.PutDataStreamLifecycleAction; import org.elasticsearch.datastreams.lifecycle.health.DataStreamLifecycleHealthIndicatorService; import org.elasticsearch.health.Diagnosis; import org.elasticsearch.health.GetHealthAction; diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/ExplainDataStreamLifecycleIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/ExplainDataStreamLifecycleIT.java index a497eed121b0c..7120196176928 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/ExplainDataStreamLifecycleIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/ExplainDataStreamLifecycleIT.java @@ -18,6 +18,7 @@ import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.datastreams.CreateDataStreamAction; import org.elasticsearch.action.datastreams.GetDataStreamAction; +import org.elasticsearch.action.datastreams.lifecycle.ExplainDataStreamLifecycleAction; import org.elasticsearch.action.datastreams.lifecycle.ExplainIndexDataStreamLifecycle; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; @@ -28,7 +29,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Nullable; import org.elasticsearch.datastreams.DataStreamsPlugin; -import org.elasticsearch.datastreams.lifecycle.action.ExplainDataStreamLifecycleAction; import org.elasticsearch.index.Index; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.plugins.Plugin; diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamGlobalRetentionIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamGlobalRetentionIT.java deleted file mode 100644 index 557e70ba65e9b..0000000000000 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamGlobalRetentionIT.java +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.datastreams.lifecycle; - -import org.elasticsearch.client.Request; -import org.elasticsearch.client.Response; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.datastreams.DisabledSecurityDataStreamTestCase; -import org.junit.After; -import org.junit.Before; - -import java.io.IOException; -import java.util.List; -import java.util.Map; -import java.util.concurrent.TimeUnit; - -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.nullValue; - -public class DataStreamGlobalRetentionIT extends DisabledSecurityDataStreamTestCase { - - @Before - public void setup() throws IOException { - updateClusterSettings( - Settings.builder() - .put("data_streams.lifecycle.poll_interval", "1s") - .put("cluster.lifecycle.default.rollover", "min_docs=1,max_docs=1") - .build() - ); - // Create a template with the default lifecycle - Request putComposableIndexTemplateRequest = new Request("POST", "/_index_template/1"); - putComposableIndexTemplateRequest.setJsonEntity(""" - { - "index_patterns": ["my-data-stream*"], - "data_stream": {}, - "template": { - "lifecycle": {} - } - } - """); - assertOK(client().performRequest(putComposableIndexTemplateRequest)); - - // Create a data streams with one doc - Request createDocRequest = new Request("POST", "/my-data-stream/_doc?refresh=true"); - createDocRequest.setJsonEntity("{ \"@timestamp\": \"2022-12-12\"}"); - assertOK(client().performRequest(createDocRequest)); - } - - @After - public void cleanUp() throws IOException { - adminClient().performRequest(new Request("DELETE", "_data_stream/*")); - } - - @SuppressWarnings("unchecked") - public void testDefaultRetention() throws Exception { - { - // Set global retention - Request request = new Request("PUT", "_data_stream/_global_retention"); - request.setJsonEntity(""" - { - "default_retention": "10s" - }"""); - assertAcknowledged(client().performRequest(request)); - } - - // Verify that the effective retention matches the default retention - { - Request request = new Request("GET", "/_data_stream/my-data-stream"); - Response response = client().performRequest(request); - List dataStreams = (List) entityAsMap(response).get("data_streams"); - assertThat(dataStreams.size(), is(1)); - Map dataStream = (Map) dataStreams.get(0); - assertThat(dataStream.get("name"), is("my-data-stream")); - Map lifecycle = (Map) dataStream.get("lifecycle"); - assertThat(lifecycle.get("effective_retention"), is("10s")); - assertThat(lifecycle.get("retention_determined_by"), is("default_global_retention")); - assertThat(lifecycle.get("data_retention"), nullValue()); - } - - // Verify that the first generation index was removed - assertBusy(() -> { - Response response = client().performRequest(new Request("GET", "/_data_stream/my-data-stream")); - Map dataStream = ((List>) entityAsMap(response).get("data_streams")).get(0); - assertThat(dataStream.get("name"), is("my-data-stream")); - List backingIndices = (List) dataStream.get("indices"); - assertThat(backingIndices.size(), is(1)); - // 2 backing indices created + 1 for the deleted index - assertThat(dataStream.get("generation"), is(3)); - }, 20, TimeUnit.SECONDS); - } - - @SuppressWarnings("unchecked") - public void testMaxRetention() throws Exception { - { - // Set global retention - Request request = new Request("PUT", "_data_stream/_global_retention"); - request.setJsonEntity(""" - { - "max_retention": "10s" - }"""); - assertAcknowledged(client().performRequest(request)); - } - boolean withDataStreamLevelRetention = randomBoolean(); - if (withDataStreamLevelRetention) { - Request request = new Request("PUT", "_data_stream/my-data-stream/_lifecycle"); - request.setJsonEntity(""" - { - "data_retention": "30d" - }"""); - assertAcknowledged(client().performRequest(request)); - } - - // Verify that the effective retention matches the max retention - { - Request request = new Request("GET", "/_data_stream/my-data-stream"); - Response response = client().performRequest(request); - List dataStreams = (List) entityAsMap(response).get("data_streams"); - assertThat(dataStreams.size(), is(1)); - Map dataStream = (Map) dataStreams.get(0); - assertThat(dataStream.get("name"), is("my-data-stream")); - Map lifecycle = (Map) dataStream.get("lifecycle"); - assertThat(lifecycle.get("effective_retention"), is("10s")); - assertThat(lifecycle.get("retention_determined_by"), is("max_global_retention")); - if (withDataStreamLevelRetention) { - assertThat(lifecycle.get("data_retention"), is("30d")); - } else { - assertThat(lifecycle.get("data_retention"), nullValue()); - } - } - - // Verify that the first generation index was removed - assertBusy(() -> { - Response response = client().performRequest(new Request("GET", "/_data_stream/my-data-stream")); - Map dataStream = ((List>) entityAsMap(response).get("data_streams")).get(0); - assertThat(dataStream.get("name"), is("my-data-stream")); - List backingIndices = (List) dataStream.get("indices"); - assertThat(backingIndices.size(), is(1)); - // 2 backing indices created + 1 for the deleted index - assertThat(dataStream.get("generation"), is(3)); - }, 20, TimeUnit.SECONDS); - } -} diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamGlobalRetentionPermissionsRestIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamGlobalRetentionPermissionsRestIT.java deleted file mode 100644 index e2e82b343fc5f..0000000000000 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamGlobalRetentionPermissionsRestIT.java +++ /dev/null @@ -1,213 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.datastreams.lifecycle; - -import org.apache.http.HttpHost; -import org.elasticsearch.client.Request; -import org.elasticsearch.client.Response; -import org.elasticsearch.client.ResponseException; -import org.elasticsearch.common.settings.SecureString; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.test.cluster.ElasticsearchCluster; -import org.elasticsearch.test.cluster.FeatureFlag; -import org.elasticsearch.test.cluster.local.distribution.DistributionType; -import org.elasticsearch.test.cluster.util.resource.Resource; -import org.elasticsearch.test.rest.ESRestTestCase; -import org.junit.ClassRule; - -import java.util.Map; - -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; - -public class DataStreamGlobalRetentionPermissionsRestIT extends ESRestTestCase { - - private static final String PASSWORD = "secret-test-password"; - - @ClassRule - public static ElasticsearchCluster cluster = ElasticsearchCluster.local() - .distribution(DistributionType.DEFAULT) - .feature(FeatureFlag.FAILURE_STORE_ENABLED) - .setting("xpack.watcher.enabled", "false") - .setting("xpack.ml.enabled", "false") - .setting("xpack.security.enabled", "true") - .setting("xpack.security.transport.ssl.enabled", "false") - .setting("xpack.security.http.ssl.enabled", "false") - .user("test_admin", PASSWORD, "superuser", false) - .user("test_manage_global_retention", PASSWORD, "manage_data_stream_global_retention", false) - .user("test_monitor_global_retention", PASSWORD, "monitor_data_stream_global_retention", false) - .user("test_monitor", PASSWORD, "manage_data_stream_lifecycle", false) - .user("test_no_privilege", PASSWORD, "no_privilege", false) - .rolesFile(Resource.fromClasspath("roles.yml")) - .build(); - - @Override - protected String getTestRestCluster() { - return cluster.getHttpAddresses(); - } - - @Override - protected Settings restClientSettings() { - // If this test is running in a test framework that handles its own authorization, we don't want to overwrite it. - if (super.restClientSettings().keySet().contains(ThreadContext.PREFIX + ".Authorization")) { - return super.restClientSettings(); - } else { - // Note: This user is assigned the role "manage_data_stream_lifecycle". That role is defined in roles.yml. - String token = basicAuthHeaderValue("test_data_stream_lifecycle", new SecureString(PASSWORD.toCharArray())); - return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); - } - } - - @Override - protected Settings restAdminSettings() { - // If this test is running in a test framework that handles its own authorization, we don't want to overwrite it. - if (super.restClientSettings().keySet().contains(ThreadContext.PREFIX + ".Authorization")) { - return super.restClientSettings(); - } else { - // Note: We use the admin user because the other one is too unprivileged, so it breaks the initialization of the test - String token = basicAuthHeaderValue("test_admin", new SecureString(PASSWORD.toCharArray())); - return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); - } - } - - private Settings restManageGlobalRetentionClientSettings() { - String token = basicAuthHeaderValue("test_manage_global_retention", new SecureString(PASSWORD.toCharArray())); - return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); - } - - private Settings restMonitorGlobalRetentionClientSettings() { - String token = basicAuthHeaderValue("test_monitor_global_retention", new SecureString(PASSWORD.toCharArray())); - return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); - } - - private Settings restOnlyManageLifecycleClientSettings() { - String token = basicAuthHeaderValue("test_monitor", new SecureString(PASSWORD.toCharArray())); - return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); - } - - private Settings restNoPrivilegeClientSettings() { - String token = basicAuthHeaderValue("test_no_privilege", new SecureString(PASSWORD.toCharArray())); - return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); - } - - public void testManageGlobalRetentionPrivileges() throws Exception { - try (var client = buildClient(restManageGlobalRetentionClientSettings(), getClusterHosts().toArray(new HttpHost[0]))) { - Request request = new Request("PUT", "_data_stream/_global_retention"); - request.setJsonEntity(""" - { - "default_retention": "1d", - "max_retention": "7d" - }"""); - assertAcknowledged(client.performRequest(request)); - Map response = entityAsMap(client.performRequest(new Request("GET", "/_data_stream/_global_retention"))); - assertThat(response.get("default_retention"), equalTo("1d")); - assertThat(response.get("max_retention"), equalTo("7d")); - assertAcknowledged(client.performRequest(new Request("DELETE", "/_data_stream/_global_retention"))); - } - } - - public void testMonitorGlobalRetentionPrivileges() throws Exception { - { - Request request = new Request("PUT", "_data_stream/_global_retention"); - request.setJsonEntity(""" - { - "default_retention": "1d", - "max_retention": "7d" - }"""); - assertAcknowledged(adminClient().performRequest(request)); - } - try (var client = buildClient(restMonitorGlobalRetentionClientSettings(), getClusterHosts().toArray(new HttpHost[0]))) { - Request request = new Request("PUT", "_data_stream/_global_retention"); - request.setJsonEntity(""" - { - "default_retention": "1d", - "max_retention": "7d" - }"""); - ResponseException responseException = expectThrows(ResponseException.class, () -> client.performRequest(request)); - assertThat(responseException.getResponse().getStatusLine().getStatusCode(), is(403)); - assertThat( - responseException.getMessage(), - containsString( - "action [cluster:admin/data_stream/global_retention/put] is unauthorized for user [test_monitor_global_retention]" - ) - ); - responseException = expectThrows( - ResponseException.class, - () -> client.performRequest(new Request("DELETE", "/_data_stream/_global_retention")) - ); - assertThat(responseException.getResponse().getStatusLine().getStatusCode(), is(403)); - assertThat( - responseException.getMessage(), - containsString( - "action [cluster:admin/data_stream/global_retention/delete] is unauthorized for user [test_monitor_global_retention]" - ) - ); - Map response = entityAsMap(client.performRequest(new Request("GET", "/_data_stream/_global_retention"))); - assertThat(response.get("default_retention"), equalTo("1d")); - assertThat(response.get("max_retention"), equalTo("7d")); - } - } - - public void testManageLifecyclePrivileges() throws Exception { - try (var client = buildClient(restOnlyManageLifecycleClientSettings(), getClusterHosts().toArray(new HttpHost[0]))) { - Request request = new Request("PUT", "_data_stream/_global_retention"); - request.setJsonEntity(""" - { - "default_retention": "1d", - "max_retention": "7d" - }"""); - ResponseException responseException = expectThrows(ResponseException.class, () -> client.performRequest(request)); - assertThat(responseException.getResponse().getStatusLine().getStatusCode(), is(403)); - assertThat( - responseException.getMessage(), - containsString("action [cluster:admin/data_stream/global_retention/put] is unauthorized for user [test_monitor]") - ); - // This use has the monitor privilege which includes the monitor_data_stream_global_retention - Response response = client.performRequest(new Request("GET", "/_data_stream/_global_retention")); - assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - } - } - - public void testNoPrivileges() throws Exception { - try (var client = buildClient(restNoPrivilegeClientSettings(), getClusterHosts().toArray(new HttpHost[0]))) { - Request request = new Request("PUT", "_data_stream/_global_retention"); - request.setJsonEntity(""" - { - "default_retention": "1d", - "max_retention": "7d" - }"""); - ResponseException responseException = expectThrows(ResponseException.class, () -> client.performRequest(request)); - assertThat(responseException.getResponse().getStatusLine().getStatusCode(), is(403)); - assertThat( - responseException.getMessage(), - containsString("action [cluster:admin/data_stream/global_retention/put] is unauthorized for user [test_no_privilege]") - ); - responseException = expectThrows( - ResponseException.class, - () -> client.performRequest(new Request("DELETE", "/_data_stream/_global_retention")) - ); - assertThat(responseException.getResponse().getStatusLine().getStatusCode(), is(403)); - assertThat( - responseException.getMessage(), - containsString("action [cluster:admin/data_stream/global_retention/delete] is unauthorized for user [test_no_privilege]") - ); - responseException = expectThrows( - ResponseException.class, - () -> client.performRequest(new Request("GET", "/_data_stream/_global_retention")) - ); - assertThat(responseException.getResponse().getStatusLine().getStatusCode(), is(403)); - assertThat( - responseException.getMessage(), - containsString("action [cluster:monitor/data_stream/global_retention/get] is unauthorized for user [test_no_privilege]") - ); - } - } -} diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java index 53e0bc287d3ec..a1e65d7784a39 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java @@ -18,6 +18,9 @@ import org.elasticsearch.action.datastreams.MigrateToDataStreamAction; import org.elasticsearch.action.datastreams.ModifyDataStreamsAction; import org.elasticsearch.action.datastreams.PromoteDataStreamAction; +import org.elasticsearch.action.datastreams.lifecycle.ExplainDataStreamLifecycleAction; +import org.elasticsearch.action.datastreams.lifecycle.GetDataStreamLifecycleAction; +import org.elasticsearch.action.datastreams.lifecycle.PutDataStreamLifecycleAction; import org.elasticsearch.client.internal.OriginSettingClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.NamedDiff; @@ -44,12 +47,9 @@ import org.elasticsearch.datastreams.lifecycle.UpdateDataStreamGlobalRetentionService; import org.elasticsearch.datastreams.lifecycle.action.DeleteDataStreamGlobalRetentionAction; import org.elasticsearch.datastreams.lifecycle.action.DeleteDataStreamLifecycleAction; -import org.elasticsearch.datastreams.lifecycle.action.ExplainDataStreamLifecycleAction; import org.elasticsearch.datastreams.lifecycle.action.GetDataStreamGlobalRetentionAction; -import org.elasticsearch.datastreams.lifecycle.action.GetDataStreamLifecycleAction; import org.elasticsearch.datastreams.lifecycle.action.GetDataStreamLifecycleStatsAction; import org.elasticsearch.datastreams.lifecycle.action.PutDataStreamGlobalRetentionAction; -import org.elasticsearch.datastreams.lifecycle.action.PutDataStreamLifecycleAction; import org.elasticsearch.datastreams.lifecycle.action.TransportDeleteDataStreamLifecycleAction; import org.elasticsearch.datastreams.lifecycle.action.TransportExplainDataStreamLifecycleAction; import org.elasticsearch.datastreams.lifecycle.action.TransportGetDataStreamLifecycleAction; @@ -58,12 +58,9 @@ import org.elasticsearch.datastreams.lifecycle.health.DataStreamLifecycleHealthIndicatorService; import org.elasticsearch.datastreams.lifecycle.health.DataStreamLifecycleHealthInfoPublisher; import org.elasticsearch.datastreams.lifecycle.rest.RestDataStreamLifecycleStatsAction; -import org.elasticsearch.datastreams.lifecycle.rest.RestDeleteDataStreamGlobalRetentionAction; import org.elasticsearch.datastreams.lifecycle.rest.RestDeleteDataStreamLifecycleAction; import org.elasticsearch.datastreams.lifecycle.rest.RestExplainDataStreamLifecycleAction; -import org.elasticsearch.datastreams.lifecycle.rest.RestGetDataStreamGlobalRetentionAction; import org.elasticsearch.datastreams.lifecycle.rest.RestGetDataStreamLifecycleAction; -import org.elasticsearch.datastreams.lifecycle.rest.RestPutDataStreamGlobalRetentionAction; import org.elasticsearch.datastreams.lifecycle.rest.RestPutDataStreamLifecycleAction; import org.elasticsearch.datastreams.rest.RestCreateDataStreamAction; import org.elasticsearch.datastreams.rest.RestDataStreamsStatsAction; @@ -290,9 +287,6 @@ public List getRestHandlers( handlers.add(new RestDeleteDataStreamLifecycleAction()); handlers.add(new RestExplainDataStreamLifecycleAction()); handlers.add(new RestDataStreamLifecycleStatsAction()); - handlers.add(new RestPutDataStreamGlobalRetentionAction()); - handlers.add(new RestGetDataStreamGlobalRetentionAction()); - handlers.add(new RestDeleteDataStreamGlobalRetentionAction()); return handlers; } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportExplainDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportExplainDataStreamLifecycleAction.java index a5c3b092a8913..e88c023e8996d 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportExplainDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportExplainDataStreamLifecycleAction.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.rollover.RolloverInfo; +import org.elasticsearch.action.datastreams.lifecycle.ExplainDataStreamLifecycleAction; import org.elasticsearch.action.datastreams.lifecycle.ExplainIndexDataStreamLifecycle; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportGetDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportGetDataStreamLifecycleAction.java index 3a3a54d747920..881f472b19d3c 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportGetDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportGetDataStreamLifecycleAction.java @@ -9,6 +9,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.datastreams.DataStreamsActionUtil; +import org.elasticsearch.action.datastreams.lifecycle.GetDataStreamLifecycleAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; import org.elasticsearch.cluster.ClusterState; diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportPutDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportPutDataStreamLifecycleAction.java index 7a33d3011c621..11ecf85b1ac26 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportPutDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportPutDataStreamLifecycleAction.java @@ -9,6 +9,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.datastreams.DataStreamsActionUtil; +import org.elasticsearch.action.datastreams.lifecycle.PutDataStreamLifecycleAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.AcknowledgedTransportMasterNodeAction; diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestDeleteDataStreamGlobalRetentionAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestDeleteDataStreamGlobalRetentionAction.java deleted file mode 100644 index 1ac12c918605f..0000000000000 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestDeleteDataStreamGlobalRetentionAction.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.datastreams.lifecycle.rest; - -import org.elasticsearch.client.internal.node.NodeClient; -import org.elasticsearch.datastreams.lifecycle.action.DeleteDataStreamGlobalRetentionAction; -import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.Scope; -import org.elasticsearch.rest.ServerlessScope; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; - -import java.io.IOException; -import java.util.List; - -import static org.elasticsearch.rest.RestRequest.Method.DELETE; - -/** - * Removes the data stream global retention configuration - */ -@ServerlessScope(Scope.PUBLIC) -public class RestDeleteDataStreamGlobalRetentionAction extends BaseRestHandler { - - @Override - public String getName() { - return "delete_data_stream_global_retention_action"; - } - - @Override - public List routes() { - return List.of(new Route(DELETE, "/_data_stream/_global_retention")); - } - - @Override - protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { - DeleteDataStreamGlobalRetentionAction.Request request = new DeleteDataStreamGlobalRetentionAction.Request(); - request.dryRun(restRequest.paramAsBoolean("dry_run", false)); - return channel -> client.execute( - DeleteDataStreamGlobalRetentionAction.INSTANCE, - request, - new RestChunkedToXContentListener<>(channel) - ); - } -} diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestDeleteDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestDeleteDataStreamLifecycleAction.java index 22d99b67b3ff1..b624892ac6bba 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestDeleteDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestDeleteDataStreamLifecycleAction.java @@ -21,7 +21,7 @@ import static org.elasticsearch.rest.RestRequest.Method.DELETE; -@ServerlessScope(Scope.PUBLIC) +@ServerlessScope(Scope.INTERNAL) public class RestDeleteDataStreamLifecycleAction extends BaseRestHandler { @Override diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestExplainDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestExplainDataStreamLifecycleAction.java index 09f4b6efce633..d3115d6d3d3a3 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestExplainDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestExplainDataStreamLifecycleAction.java @@ -8,10 +8,10 @@ package org.elasticsearch.datastreams.lifecycle.rest; +import org.elasticsearch.action.datastreams.lifecycle.ExplainDataStreamLifecycleAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.Strings; -import org.elasticsearch.datastreams.lifecycle.action.ExplainDataStreamLifecycleAction; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.Scope; diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestGetDataStreamGlobalRetentionAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestGetDataStreamGlobalRetentionAction.java deleted file mode 100644 index cbe403af35f72..0000000000000 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestGetDataStreamGlobalRetentionAction.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.datastreams.lifecycle.rest; - -import org.elasticsearch.client.internal.node.NodeClient; -import org.elasticsearch.datastreams.lifecycle.action.GetDataStreamGlobalRetentionAction; -import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.Scope; -import org.elasticsearch.rest.ServerlessScope; -import org.elasticsearch.rest.action.RestToXContentListener; - -import java.io.IOException; -import java.util.List; - -import static org.elasticsearch.rest.RestRequest.Method.GET; - -/** - * Retrieves the data stream global retention configuration. - */ -@ServerlessScope(Scope.PUBLIC) -public class RestGetDataStreamGlobalRetentionAction extends BaseRestHandler { - - @Override - public String getName() { - return "get_data_stream_global_retention_action"; - } - - @Override - public List routes() { - return List.of(new Route(GET, "/_data_stream/_global_retention")); - } - - @Override - protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { - GetDataStreamGlobalRetentionAction.Request request = new GetDataStreamGlobalRetentionAction.Request(); - request.local(restRequest.paramAsBoolean("local", request.local())); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); - - return channel -> client.execute(GetDataStreamGlobalRetentionAction.INSTANCE, request, new RestToXContentListener<>(channel)); - } -} diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestGetDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestGetDataStreamLifecycleAction.java index f2c514c794b32..3d802d483fd8c 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestGetDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestGetDataStreamLifecycleAction.java @@ -7,10 +7,10 @@ */ package org.elasticsearch.datastreams.lifecycle.rest; +import org.elasticsearch.action.datastreams.lifecycle.GetDataStreamLifecycleAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.Strings; -import org.elasticsearch.datastreams.lifecycle.action.GetDataStreamLifecycleAction; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.Scope; diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestPutDataStreamGlobalRetentionAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestPutDataStreamGlobalRetentionAction.java deleted file mode 100644 index 5331c4df16db0..0000000000000 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestPutDataStreamGlobalRetentionAction.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.datastreams.lifecycle.rest; - -import org.elasticsearch.client.internal.node.NodeClient; -import org.elasticsearch.datastreams.lifecycle.action.PutDataStreamGlobalRetentionAction; -import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.Scope; -import org.elasticsearch.rest.ServerlessScope; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; -import org.elasticsearch.xcontent.XContentParser; - -import java.io.IOException; -import java.util.List; - -import static org.elasticsearch.rest.RestRequest.Method.PUT; - -/** - * Updates the default_retention and the max_retention of the data stream global retention configuration. It - * does not accept an empty payload. - */ -@ServerlessScope(Scope.PUBLIC) -public class RestPutDataStreamGlobalRetentionAction extends BaseRestHandler { - - @Override - public String getName() { - return "put_data_stream_global_retention_action"; - } - - @Override - public List routes() { - return List.of(new Route(PUT, "/_data_stream/_global_retention")); - } - - @Override - protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { - try (XContentParser parser = restRequest.contentParser()) { - PutDataStreamGlobalRetentionAction.Request request = PutDataStreamGlobalRetentionAction.Request.parseRequest(parser); - request.dryRun(restRequest.paramAsBoolean("dry_run", false)); - return channel -> client.execute( - PutDataStreamGlobalRetentionAction.INSTANCE, - request, - new RestChunkedToXContentListener<>(channel) - ); - } - } -} diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestPutDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestPutDataStreamLifecycleAction.java index d97990b46c0ba..f763c0d75ed47 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestPutDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestPutDataStreamLifecycleAction.java @@ -7,10 +7,10 @@ */ package org.elasticsearch.datastreams.lifecycle.rest; +import org.elasticsearch.action.datastreams.lifecycle.PutDataStreamLifecycleAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.Strings; -import org.elasticsearch.datastreams.lifecycle.action.PutDataStreamLifecycleAction; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.Scope; diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/lifecycle/10_explain_lifecycle.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/lifecycle/10_explain_lifecycle.yml index c79775c51c392..d03174b448ff2 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/lifecycle/10_explain_lifecycle.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/lifecycle/10_explain_lifecycle.yml @@ -1,9 +1,9 @@ --- "Explain backing index lifecycle": - - skip: - cluster_features: ["data_stream.lifecycle.global_retention"] - reason: "Data stream lifecycle with effective retention was released in 8.14" - features: allowed_warnings + - requires: + cluster_features: [ "gte_v8.11.0" ] + reason: "Data stream lifecycle was released as tech preview in 8.11" + test_runner_features: allowed_warnings - do: allowed_warnings: - "index template [template-with-lifecycle] has index patterns [managed-data-stream] matching patterns from existing older templates [global] with patterns (global => [*]); this template [template-with-lifecycle] will take precedence during new index creation" @@ -36,9 +36,7 @@ indices.explain_data_lifecycle: index: $backing_index - match: { indices.$backing_index.managed_by_lifecycle: true } - - match: { indices.$backing_index.lifecycle.data_retention: "30d" } - - match: { indices.$backing_index.lifecycle.effective_retention: "30d"} - - match: { indices.$backing_index.lifecycle.retention_determined_by: "data_stream_configuration"} + - match: { indices.$backing_index.lifecycle.data_retention: '30d' } - match: { indices.$backing_index.lifecycle.enabled: true } - is_false: indices.$backing_index.lifecycle.rollover @@ -48,9 +46,7 @@ index: $backing_index include_defaults: true - match: { indices.$backing_index.managed_by_lifecycle: true } - - match: { indices.$backing_index.lifecycle.data_retention: "30d" } - - match: { indices.$backing_index.lifecycle.effective_retention: "30d"} - - match: { indices.$backing_index.lifecycle.retention_determined_by: "data_stream_configuration"} + - match: { indices.$backing_index.lifecycle.data_retention: '30d' } - is_true: indices.$backing_index.lifecycle.rollover diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/lifecycle/190_create_data_stream_with_lifecycle.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/lifecycle/190_create_data_stream_with_lifecycle.yml index 745fd342d3a43..e13f245855f8c 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/lifecycle/190_create_data_stream_with_lifecycle.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/lifecycle/190_create_data_stream_with_lifecycle.yml @@ -1,8 +1,8 @@ --- "Create data stream with lifecycle": - requires: - cluster_features: ["gte_v8.14.0"] - reason: "Data stream lifecycle with effective retention was released in 8.14" + cluster_features: [ "gte_v8.11.0" ] + reason: "Data stream lifecycle was GA in 8.11" test_runner_features: allowed_warnings - do: allowed_warnings: @@ -35,7 +35,5 @@ - match: { data_streams.0.template: 'template-with-lifecycle' } - match: { data_streams.0.hidden: false } - match: { data_streams.0.lifecycle.data_retention: '30d' } - - match: { data_streams.0.lifecycle.effective_retention: '30d'} - - match: { data_streams.0.lifecycle.retention_determined_by: 'data_stream_configuration'} - match: { data_streams.0.lifecycle.enabled: true } - is_true: data_streams.0.lifecycle.rollover diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/lifecycle/20_basic.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/lifecycle/20_basic.yml index ea34c6880d1f6..18aee1bf77232 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/lifecycle/20_basic.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/lifecycle/20_basic.yml @@ -1,8 +1,8 @@ setup: - skip: features: allowed_warnings - cluster_features: ["data_stream.lifecycle.global_retention"] - reason: "Data stream lifecycles with global retention are only supported in 8.14+" + cluster_features: ["gte_v8.11.0"] + reason: "Data stream lifecycles only supported in 8.11+" - do: allowed_warnings: - "index template [my-lifecycle] has index patterns [data-stream-with-lifecycle] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-lifecycle] will take precedence during new index creation" @@ -47,8 +47,6 @@ setup: - length: { data_streams: 1} - match: { data_streams.0.name: data-stream-with-lifecycle } - match: { data_streams.0.lifecycle.data_retention: '10d' } - - match: { data_streams.0.lifecycle.effective_retention: '10d' } - - match: { data_streams.0.lifecycle.retention_determined_by: 'data_stream_configuration' } - match: { data_streams.0.lifecycle.enabled: true} --- @@ -63,7 +61,6 @@ setup: - length: { data_streams: 1} - match: { data_streams.0.name: simple-data-stream1 } - match: { data_streams.0.lifecycle.enabled: true} - - is_false: data_streams.0.lifecycle.effective_retention --- "Put data stream lifecycle": @@ -95,7 +92,6 @@ setup: - length: { data_streams: 2 } - match: { data_streams.0.name: data-stream-with-lifecycle } - match: { data_streams.0.lifecycle.data_retention: "30d" } - - is_false: data_streams.0.lifecycle.effective_retention - match: { data_streams.0.lifecycle.enabled: false} - match: { data_streams.0.lifecycle.downsampling.0.after: '10d'} - match: { data_streams.0.lifecycle.downsampling.0.fixed_interval: '1h'} @@ -103,7 +99,6 @@ setup: - match: { data_streams.0.lifecycle.downsampling.1.fixed_interval: '10h'} - match: { data_streams.1.name: simple-data-stream1 } - match: { data_streams.1.lifecycle.data_retention: "30d" } - - is_false: data_streams.0.lifecycle.effective_retention - match: { data_streams.1.lifecycle.enabled: false} - match: { data_streams.1.lifecycle.downsampling.0.after: '10d'} - match: { data_streams.1.lifecycle.downsampling.0.fixed_interval: '1h'} @@ -129,8 +124,6 @@ setup: - match: { data_streams.0.lifecycle.enabled: true} - match: { data_streams.1.name: simple-data-stream1 } - match: { data_streams.1.lifecycle.data_retention: "30d" } - - match: { data_streams.1.lifecycle.effective_retention: "30d"} - - match: { data_streams.1.lifecycle.retention_determined_by: "data_stream_configuration"} - match: { data_streams.1.lifecycle.enabled: true} @@ -144,8 +137,6 @@ setup: - length: { data_streams: 1} - match: { data_streams.0.name: data-stream-with-lifecycle } - match: { data_streams.0.lifecycle.data_retention: "10d" } - - match: { data_streams.0.lifecycle.effective_retention: "10d"} - - match: { data_streams.0.lifecycle.retention_determined_by: "data_stream_configuration"} - is_true: data_streams.0.lifecycle.rollover --- @@ -163,8 +154,6 @@ setup: - length: { data_streams: 1 } - match: { data_streams.0.name: simple-data-stream1 } - match: { data_streams.0.lifecycle.data_retention: "30d" } - - match: { data_streams.0.lifecycle.effective_retention: "30d"} - - match: { data_streams.0.lifecycle.retention_determined_by: "data_stream_configuration"} - match: { data_streams.0.lifecycle.enabled: true } - do: diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/lifecycle/30_not_found.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/lifecycle/30_not_found.yml index 303fbddd6c19c..24d0a5649a619 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/lifecycle/30_not_found.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/lifecycle/30_not_found.yml @@ -23,18 +23,13 @@ setup: --- "Get data stream lifecycle": - - skip: - cluster_features: ["data_stream.lifecycle.global_retention"] - reason: "Data stream lifecycle with effective retention was released in 8.14" - do: indices.get_data_lifecycle: name: "*" - length: { data_streams: 1} - match: { data_streams.0.name: my-data-stream-1 } - - match: { data_streams.0.lifecycle.data_retention: "10d" } - - match: { data_streams.0.lifecycle.effective_retention: "10d"} - - match: { data_streams.0.lifecycle.retention_determined_by: "data_stream_configuration"} + - match: { data_streams.0.lifecycle.data_retention: '10d' } - match: { data_streams.0.lifecycle.enabled: true} --- @@ -48,9 +43,7 @@ setup: --- "Put data stream lifecycle does not succeed when at lease one data stream does not exist": - - skip: - cluster_features: ["data_stream.lifecycle.global_retention"] - reason: "Data stream lifecycle with effective retention was released in 8.14" + - do: catch: missing indices.put_data_lifecycle: @@ -64,16 +57,12 @@ setup: name: "*" - length: { data_streams: 1 } - match: { data_streams.0.name: my-data-stream-1 } - - match: { data_streams.0.lifecycle.data_retention: "10d" } - - match: { data_streams.0.lifecycle.effective_retention: "10d"} - - match: { data_streams.0.lifecycle.retention_determined_by: "data_stream_configuration"} + - match: { data_streams.0.lifecycle.data_retention: '10d' } - match: { data_streams.0.lifecycle.enabled: true } --- "Delete data stream lifecycle does not succeed when at lease one data stream does not exist": - - skip: - cluster_features: ["data_stream.lifecycle.global_retention"] - reason: "Data stream lifecycle with effective retention was released in 8.14" + - do: catch: missing indices.delete_data_lifecycle: @@ -85,7 +74,5 @@ setup: name: "*" - length: { data_streams: 1 } - match: { data_streams.0.name: my-data-stream-1 } - - match: { data_streams.0.lifecycle.data_retention: "10d" } - - match: { data_streams.0.lifecycle.effective_retention: "10d"} - - match: { data_streams.0.lifecycle.retention_determined_by: "data_stream_configuration"} + - match: { data_streams.0.lifecycle.data_retention: '10d' } - match: { data_streams.0.lifecycle.enabled: true } diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/lifecycle/40_global_retention.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/lifecycle/40_global_retention.yml deleted file mode 100644 index 93df045e4568e..0000000000000 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/lifecycle/40_global_retention.yml +++ /dev/null @@ -1,139 +0,0 @@ -setup: - - skip: - features: allowed_warnings - cluster_features: ["data_stream.lifecycle.global_retention"] - reason: "Global retention was added in 8.14" - - do: - allowed_warnings: - - "index template [my-lifecycle] has index patterns [my-data-stream-1] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-lifecycle] will take precedence during new index creation" - indices.put_index_template: - name: my-lifecycle - body: - index_patterns: [my-data-stream-*] - template: - settings: - index.number_of_replicas: 0 - lifecycle: {} - data_stream: {} - ---- -"CRUD global retention": - - do: - indices.create_data_stream: - name: my-data-stream-1 - - do: - cluster.health: - index: my-data-stream-1 - wait_for_status: green - - do: - data_streams.put_global_retention: - body: - default_retention: "7d" - max_retention: "90d" - - is_true: acknowledged - - is_false: dry_run - - match: {affected_data_streams.0.name: "my-data-stream-1"} - - match: {affected_data_streams.0.previous_effective_retention: "infinite"} - - match: {affected_data_streams.0.new_effective_retention: "7d"} - - - do: - data_streams.get_global_retention: { } - - match: { default_retention: "7d" } - - match: { max_retention: "90d" } - - - do: - data_streams.delete_global_retention: { } - - is_true: acknowledged - - is_false: dry_run - - match: { affected_data_streams.0.name: "my-data-stream-1" } - - match: { affected_data_streams.0.previous_effective_retention: "7d" } - - match: { affected_data_streams.0.new_effective_retention: "infinite" } - - - do: - data_streams.get_global_retention: { } - - is_false: default_retention - - is_false: max_retention - - - do: - indices.delete_data_stream: - name: my-data-stream-1 ---- -"Dry run global retention": - - do: - indices.create_data_stream: - name: my-data-stream-2 - - do: - indices.put_data_lifecycle: - name: "my-data-stream-2" - body: > - { - "data_retention": "90d" - } - - is_true: acknowledged - - - do: - data_streams.put_global_retention: - dry_run: true - body: - default_retention: "7d" - max_retention: "30d" - - is_false: acknowledged - - is_true: dry_run - - match: {affected_data_streams.0.name: "my-data-stream-2"} - - match: {affected_data_streams.0.previous_effective_retention: "90d"} - - match: {affected_data_streams.0.new_effective_retention: "30d"} - - - do: - indices.get_data_stream: - name: "my-data-stream-2" - include_defaults: true - - match: { data_streams.0.name: my-data-stream-2 } - - match: { data_streams.0.lifecycle.effective_retention: '90d' } - - match: { data_streams.0.lifecycle.retention_determined_by: 'data_stream_configuration' } - - do: - indices.delete_data_stream: - name: my-data-stream-2 ---- -"Default global retention is retrieved by data stream and index templates": - - do: - indices.create_data_stream: - name: my-data-stream-3 - - - do: - data_streams.put_global_retention: - body: - default_retention: "7d" - max_retention: "90d" - - is_true: acknowledged - - is_false: dry_run - - match: {affected_data_streams.0.name: "my-data-stream-3"} - - match: {affected_data_streams.0.previous_effective_retention: "infinite"} - - match: {affected_data_streams.0.new_effective_retention: "7d"} - - - do: - data_streams.get_global_retention: { } - - match: { default_retention: "7d" } - - match: { max_retention: "90d" } - - - do: - indices.get_data_stream: - name: "my-data-stream-3" - - match: { data_streams.0.name: my-data-stream-3 } - - match: { data_streams.0.lifecycle.effective_retention: '7d' } - - match: { data_streams.0.lifecycle.retention_determined_by: 'default_global_retention' } - - match: { data_streams.0.lifecycle.enabled: true } - - - do: - indices.get_index_template: - name: my-lifecycle - - - match: { index_templates.0.name: my-lifecycle } - - match: { index_templates.0.index_template.template.lifecycle.enabled: true } - - match: { index_templates.0.index_template.template.lifecycle.effective_retention: "7d" } - - match: { index_templates.0.index_template.template.lifecycle.retention_determined_by: "default_global_retention" } - - - do: - data_streams.delete_global_retention: { } - - do: - indices.delete_data_stream: - name: my-data-stream-3 diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java index 09507ae926f44..cb17503579e32 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java @@ -749,6 +749,20 @@ protected void writeValue(XContentBuilder b, long value) throws IOException { */ static double decodeForSyntheticSource(long scaledValue, double scalingFactor) { double v = scaledValue / scalingFactor; + + // If original double value is close to MAX_VALUE + // and rounding is performed in the direction of the same infinity + // it is possible to "overshoot" infinity during reconstruction. + // E.g. for a value close to Double.MAX_VALUE "true" scaled value is 10.5 + // and with rounding it becomes 11. + // Now, because of that rounding difference, 11 divided by scaling factor goes into infinity. + // There is nothing we can do about it so we'll return the closest finite value to infinity + // which is MAX_VALUE. + if (Double.isInfinite(v)) { + var sign = v == Double.POSITIVE_INFINITY ? 1 : -1; + return sign * Double.MAX_VALUE; + } + long reenc = Math.round(v * scalingFactor); if (reenc != scaledValue) { if (reenc > scaledValue) { diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapperTests.java index d6eb55dfb23e4..253df4de999db 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapperTests.java @@ -395,6 +395,12 @@ private Tuple generateValue() { private double round(double d) { long encoded = Math.round(d * scalingFactor); double decoded = encoded / scalingFactor; + // Special case due to rounding, see implementation. + if (Double.isInfinite(decoded)) { + var sign = decoded == Double.POSITIVE_INFINITY ? 1 : -1; + return sign * Double.MAX_VALUE; + } + long reencoded = Math.round(decoded * scalingFactor); if (encoded != reencoded) { if (encoded > reencoded) { @@ -406,6 +412,11 @@ private double round(double d) { } private double roundDocValues(double d) { + // Special case due to rounding, see implementation. + if (Math.abs(d) == Double.MAX_VALUE) { + return d; + } + long encoded = Math.round(d * scalingFactor); return encoded * (1 / scalingFactor); } @@ -526,7 +537,7 @@ public void testEncodeDecodeSaturatedLow() { } /** - * Tests that numbers whose encoded value is {@code Long.MIN_VALUE} can be round + * Tests that numbers whose encoded value is {@code Long.MAX_VALUE} can be round * tripped through synthetic source. */ public void testEncodeDecodeSaturatedHigh() { @@ -580,6 +591,28 @@ public void testDecodeEncode() { ); } + /** + * Tests the case when decoded value is infinite due to rounding. + */ + public void testDecodeHandlingInfinity() { + for (var sign : new long[] { 1, -1 }) { + long encoded = 101; + double encodedNoRounding = 100.5; + assertEquals(encoded, Math.round(encodedNoRounding)); + + var signedMax = sign * Double.MAX_VALUE; + // We need a scaling factor that will + // 1. make encoded long small resulting in significant loss of precision due to rounding + // 2. result in long value being rounded in correct direction. + // + // So we take a scaling factor that would put us right at MAX_VALUE + // without rounding and hence go beyond MAX_VALUE with rounding. + double scalingFactor = (encodedNoRounding / signedMax); + + assertThat(ScaledFloatFieldMapper.decodeForSyntheticSource(encoded, scalingFactor), equalTo(signedMax)); + } + } + private double encodeDecode(double value, double scalingFactor) { return ScaledFloatFieldMapper.decodeForSyntheticSource(ScaledFloatFieldMapper.encode(value, scalingFactor), scalingFactor); } diff --git a/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/30_inner_hits.yml b/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/30_inner_hits.yml index a561ebbae00e9..eff9a9beb35bc 100644 --- a/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/30_inner_hits.yml +++ b/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/30_inner_hits.yml @@ -140,7 +140,7 @@ profile fetch: - gt: { profile.shards.0.fetch.breakdown.next_reader: 0 } - gt: { profile.shards.0.fetch.breakdown.load_stored_fields_count: 0 } - gt: { profile.shards.0.fetch.breakdown.load_stored_fields: 0 } - - match: { profile.shards.0.fetch.debug.stored_fields: [_id, _routing, _source] } + - match: { profile.shards.0.fetch.debug.stored_fields: [_id, _ignored, _routing, _source] } - length: { profile.shards.0.fetch.children: 4 } - match: { profile.shards.0.fetch.children.0.type: FetchFieldsPhase } - gt: { profile.shards.0.fetch.children.0.breakdown.next_reader_count: 0 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/data_streams.delete_global_retention.json b/rest-api-spec/src/main/resources/rest-api-spec/api/data_streams.delete_global_retention.json deleted file mode 100644 index 1eb4621a7b055..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/data_streams.delete_global_retention.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "data_streams.delete_global_retention":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-delete-global-retention.html", - "description":"Deletes the global retention configuration that applies to all data streams managed by the data stream lifecycle." - }, - "stability":"experimental", - "visibility":"public", - "headers":{ - "accept": [ "application/json"], - "content_type": ["application/json"] - }, - "url":{ - "paths":[ - { - "path":"/_data_stream/_global_retention", - "methods":[ - "DELETE" - ] - } - ] - }, - "params":{ - "dry_run":{ - "type":"boolean", - "description":"Determines whether the global retention provided should be applied or only the impact should be determined.", - "default":false - }, - "master_timeout":{ - "type":"time", - "description":"Specify timeout for connection to master." - } - } - } -} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/data_streams.get_global_retention.json b/rest-api-spec/src/main/resources/rest-api-spec/api/data_streams.get_global_retention.json deleted file mode 100644 index 9084db36d7d90..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/data_streams.get_global_retention.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "data_streams.get_global_retention":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-get-global-retention.html", - "description":"Returns global retention configuration that applies to all data streams managed by the data stream lifecycle." - }, - "stability":"experimental", - "visibility":"public", - "headers":{ - "accept": [ "application/json"] - }, - "url":{ - "paths":[ - { - "path":"/_data_stream/_global_retention", - "methods":[ - "GET" - ] - } - ] - }, - "params":{ - "local":{ - "type":"boolean", - "description":"Return the global retention retrieved from the node that received the request." - } - } - } -} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/data_streams.put_global_retention.json b/rest-api-spec/src/main/resources/rest-api-spec/api/data_streams.put_global_retention.json deleted file mode 100644 index 9f369f4c7616d..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/data_streams.put_global_retention.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "data_streams.put_global_retention":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-put-global-retention.html", - "description":"Updates the global retention configuration that applies to all data streams managed by the data stream lifecycle." - }, - "stability":"experimental", - "visibility":"public", - "headers":{ - "accept": [ "application/json"], - "content_type": ["application/json"] - }, - "url":{ - "paths":[ - { - "path":"/_data_stream/_global_retention", - "methods":[ - "PUT" - ] - } - ] - }, - "params":{ - "dry_run":{ - "type":"boolean", - "description":"Determines whether the global retention provided should be applied or only the impact should be determined.", - "default":false - }, - "master_timeout":{ - "type":"time", - "description":"Specify timeout for connection to master" - } - }, - "body":{ - "description":"The global retention configuration including optional values for default and max retention.", - "required":true - } - } -} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.component_template/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.component_template/10_basic.yml index af2d6f946d2ff..f698d3399f27d 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.component_template/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.component_template/10_basic.yml @@ -117,8 +117,8 @@ --- "Add data stream lifecycle": - requires: - cluster_features: ["data_stream.lifecycle.global_retention"] - reason: "Data stream lifecycle with global retention was available from 8.14" + cluster_features: ["gte_v8.11.0"] + reason: "Data stream lifecycle was available from 8.11" - do: cluster.put_component_template: @@ -141,14 +141,12 @@ - match: {component_templates.0.component_template.version: 1} - match: {component_templates.0.component_template.template.lifecycle.enabled: true} - match: {component_templates.0.component_template.template.lifecycle.data_retention: "10d"} - - match: {component_templates.0.component_template.template.lifecycle.effective_retention: "10d"} - - match: {component_templates.0.component_template.template.lifecycle.retention_determined_by: "data_stream_configuration"} --- "Get data stream lifecycle with default rollover": - requires: - cluster_features: ["data_stream.lifecycle.global_retention"] - reason: "Data stream lifecycle with effective retention was available from 8.14" + cluster_features: ["gte_v8.11.0"] + reason: "Data stream lifecycle was available from 8.11" - do: cluster.put_component_template: @@ -172,6 +170,4 @@ - match: {component_templates.0.component_template.version: 1} - match: {component_templates.0.component_template.template.lifecycle.enabled: true} - match: {component_templates.0.component_template.template.lifecycle.data_retention: "10d"} - - match: {component_templates.0.component_template.template.lifecycle.effective_retention: "10d"} - - match: {component_templates.0.component_template.template.lifecycle.retention_determined_by: "data_stream_configuration"} - is_true: component_templates.0.component_template.template.lifecycle.rollover diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_index_template/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_index_template/10_basic.yml index dc3361fefab6e..2079c01079ce1 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_index_template/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_index_template/10_basic.yml @@ -93,8 +93,8 @@ setup: --- "Add data stream lifecycle": - skip: - cluster_features: ["data_stream.lifecycle.global_retention"] - reason: "Data stream lifecycle with effective retention was released in 8.14" + cluster_features: ["gte_v8.11.0"] + reason: "Data stream lifecycle in index templates was updated after 8.10" features: allowed_warnings - do: @@ -124,14 +124,12 @@ setup: - match: {index_templates.0.index_template.template.mappings: {properties: {field: {type: keyword}}}} - match: {index_templates.0.index_template.template.lifecycle.enabled: true} - match: {index_templates.0.index_template.template.lifecycle.data_retention: "30d"} - - match: {index_templates.0.index_template.template.lifecycle.effective_retention: "30d"} - - match: {index_templates.0.index_template.template.lifecycle.retention_determined_by: "data_stream_configuration"} --- "Get data stream lifecycle with default rollover": - skip: - cluster_features: ["data_stream.lifecycle.global_retention"] - reason: "Data stream lifecycle with effective retention was released in 8.14" + cluster_features: ["gte_v8.11.0"] + reason: "Data stream lifecycle in index templates was updated after 8.10" features: allowed_warnings - do: @@ -154,13 +152,11 @@ setup: - match: {index_templates.0.index_template.index_patterns: ["data-stream-with-lifecycle-*"]} - match: {index_templates.0.index_template.template.lifecycle.enabled: true} - match: {index_templates.0.index_template.template.lifecycle.data_retention: "30d"} - - match: {index_templates.0.index_template.template.lifecycle.effective_retention: "30d"} - - match: {index_templates.0.index_template.template.lifecycle.retention_determined_by: "data_stream_configuration"} - is_true: index_templates.0.index_template.template.lifecycle.rollover --- "Reject data stream lifecycle without data stream configuration": - - skip: + - requires: cluster_features: ["gte_v8.11.0"] reason: "Data stream lifecycle in index templates was updated after 8.10" - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.simulate_index_template/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.simulate_index_template/10_basic.yml index 6790014be9951..81c8cf64169e2 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.simulate_index_template/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.simulate_index_template/10_basic.yml @@ -227,8 +227,8 @@ --- "Simulate index template with lifecycle and include defaults": - requires: - cluster_features: ["gte_v8.14.0"] - reason: "Data stream lifecycle with effective retention was released in 8.14" + cluster_features: ["gte_v8.11.0"] + reason: "Lifecycle is only available in 8.11+" test_runner_features: ["default_shards"] - do: @@ -248,7 +248,5 @@ - match: {template.lifecycle.enabled: true} - match: {template.lifecycle.data_retention: "7d"} - - match: {template.lifecycle.effective_retention: "7d"} - - match: {template.lifecycle.retention_determined_by: "data_stream_configuration"} - is_true: template.lifecycle.rollover - match: {overlapping: []} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.simulate_template/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.simulate_template/10_basic.yml index ff53a762b75ef..236653b7ca9ad 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.simulate_template/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.simulate_template/10_basic.yml @@ -1,9 +1,9 @@ --- "Simulate template without a template in the body": - - requires: - cluster_features: ["gte_v7.9.0"] + - skip: + version: " - 7.8.99" reason: "only available in 7.9+" - test_runner_features: ["default_shards"] + features: ["default_shards"] - do: indices.put_index_template: @@ -30,10 +30,10 @@ --- "Simulate index template specifying a new template": - - requires: - cluster_features: ["gte_v7.9.0"] + - skip: + version: " - 7.8.99" reason: "only available in 7.9+" - test_runner_features: ["default_shards"] + features: ["default_shards"] - do: indices.put_index_template: @@ -84,10 +84,10 @@ --- "Simulate template matches overlapping legacy and composable templates": - - requires: - cluster_features: ["gte_v7.9.0"] + - skip: + version: " - 7.8.99" reason: "only available in 7.9+" - test_runner_features: ["allowed_warnings", "default_shards"] + features: ["allowed_warnings", "default_shards"] - do: indices.put_template: @@ -147,10 +147,10 @@ --- "Simulate replacing a template with a newer version": - - requires: - cluster_features: ["gte_v8.0.0"] + - skip: + version: " - 7.99.99" reason: "not yet backported" - test_runner_features: ["allowed_warnings", "default_shards"] + features: ["allowed_warnings", "default_shards"] - do: indices.put_index_template: @@ -202,8 +202,8 @@ --- "Simulate template with lifecycle and include defaults": - requires: - cluster_features: ["gte_v8.14.0"] - reason: "Data stream lifecycle with effective retention was released in 8.14" + cluster_features: [ "gte_v8.11.0" ] + reason: "Lifecycle is only available in 8.11+" test_runner_features: ["default_shards"] - do: @@ -223,6 +223,4 @@ - match: {template.lifecycle.enabled: true} - match: {template.lifecycle.data_retention: "7d"} - - match: {template.lifecycle.effective_retention: "7d"} - - match: {template.lifecycle.retention_determined_by: "data_stream_configuration"} - is_true: template.lifecycle.rollover diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/370_profile.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/370_profile.yml index 200f7292291b1..817c62dbdd12d 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/370_profile.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/370_profile.yml @@ -22,8 +22,8 @@ setup: --- fetch fields: - skip: - version: ' - 8.13.99' - reason: fetch fields and stored_fields using ValueFetcher + version: ' - 8.14.99' + reason: _ignored is returned only from 8.15 on - do: search: @@ -41,7 +41,7 @@ fetch fields: - gt: { profile.shards.0.fetch.breakdown.next_reader: 0 } - gt: { profile.shards.0.fetch.breakdown.load_stored_fields_count: 0 } - gt: { profile.shards.0.fetch.breakdown.load_stored_fields: 0 } - - match: { profile.shards.0.fetch.debug.stored_fields: [_id, _routing, _source] } + - match: { profile.shards.0.fetch.debug.stored_fields: [_id, _ignored, _routing, _source] } - length: { profile.shards.0.fetch.children: 2 } - match: { profile.shards.0.fetch.children.0.type: FetchFieldsPhase } - gt: { profile.shards.0.fetch.children.0.breakdown.next_reader_count: 0 } @@ -57,8 +57,8 @@ fetch fields: --- fetch source: - skip: - version: ' - 8.13.99' - reason: fetch fields and stored_fields using ValueFetcher + version: ' - 8.14.99' + reason: _ignored is returned only from 8.15 on - do: search: @@ -74,7 +74,7 @@ fetch source: - gt: { profile.shards.0.fetch.breakdown.next_reader: 0 } - gt: { profile.shards.0.fetch.breakdown.load_stored_fields_count: 0 } - gt: { profile.shards.0.fetch.breakdown.load_stored_fields: 0 } - - match: { profile.shards.0.fetch.debug.stored_fields: [_id, _routing, _source] } + - match: { profile.shards.0.fetch.debug.stored_fields: [_id, _ignored, _routing, _source] } - length: { profile.shards.0.fetch.children: 3 } - match: { profile.shards.0.fetch.children.0.type: FetchFieldsPhase } - match: { profile.shards.0.fetch.children.1.type: FetchSourcePhase } @@ -88,8 +88,8 @@ fetch source: --- fetch nested source: - skip: - version: ' - 8.13.99' - reason: fetch fields and stored_fields using ValueFetcher + version: ' - 8.14.99' + reason: _ignored is returned only from 8.15 on - do: indices.create: @@ -139,7 +139,7 @@ fetch nested source: - gt: { profile.shards.0.fetch.breakdown.next_reader: 0 } - gt: { profile.shards.0.fetch.breakdown.load_stored_fields_count: 0 } - gt: { profile.shards.0.fetch.breakdown.load_stored_fields: 0 } - - match: { profile.shards.0.fetch.debug.stored_fields: [_id, _routing, _source] } + - match: { profile.shards.0.fetch.debug.stored_fields: [_id, _ignored, _routing, _source] } - length: { profile.shards.0.fetch.children: 4 } - match: { profile.shards.0.fetch.children.0.type: FetchFieldsPhase } - match: { profile.shards.0.fetch.children.1.type: FetchSourcePhase } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/source/MetadataFetchingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/source/MetadataFetchingIT.java index b8d1d45a6f85d..9e0dd984c9a2a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/source/MetadataFetchingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/source/MetadataFetchingIT.java @@ -168,4 +168,18 @@ public void testInvalid() { assertThat(exc.getMessage(), equalTo("cannot combine _none_ with other fields")); } } + + public void testFetchId() { + assertAcked(prepareCreate("test")); + ensureGreen(); + + prepareIndex("test").setId("1").setSource("field", "value").get(); + refresh(); + + assertResponse(prepareSearch("test").addFetchField("_id"), response -> { + assertEquals(1, response.getHits().getHits().length); + assertEquals("1", response.getHits().getAt(0).getId()); + assertEquals("1", response.getHits().getAt(0).field("_id").getValue()); + }); + } } diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 88a1049a42557..ab7b26570a665 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -173,7 +173,8 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_8_13_2 = new Version(8_13_02_99); public static final Version V_8_13_3 = new Version(8_13_03_99); public static final Version V_8_14_0 = new Version(8_14_00_99); - public static final Version CURRENT = V_8_14_0; + public static final Version V_8_15_0 = new Version(8_15_00_99); + public static final Version CURRENT = V_8_15_0; private static final NavigableMap VERSION_IDS; private static final Map VERSION_STRINGS; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java index d352f1be5e65a..3bf9c3715b29a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java @@ -16,7 +16,6 @@ import org.elasticsearch.action.support.master.MasterNodeReadRequest; import org.elasticsearch.cluster.metadata.ComponentTemplate; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; -import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; @@ -191,14 +190,13 @@ public int hashCode() { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - Params withEffectiveRetentionParams = new DelegatingMapParams(DataStreamLifecycle.INCLUDE_EFFECTIVE_RETENTION_PARAMS, params); builder.startObject(); builder.startArray(COMPONENT_TEMPLATES.getPreferredName()); for (Map.Entry componentTemplate : this.componentTemplates.entrySet()) { builder.startObject(); builder.field(NAME.getPreferredName(), componentTemplate.getKey()); builder.field(COMPONENT_TEMPLATE.getPreferredName()); - componentTemplate.getValue().toXContent(builder, withEffectiveRetentionParams, rolloverConfiguration, globalRetention); + componentTemplate.getValue().toXContent(builder, params, rolloverConfiguration, globalRetention); builder.endObject(); } builder.endArray(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java index 668e3f8e7c10f..240fdd2ae8199 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java @@ -16,7 +16,6 @@ import org.elasticsearch.action.support.master.MasterNodeReadRequest; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; -import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; @@ -190,14 +189,13 @@ public int hashCode() { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - Params withEffectiveRetentionParams = new DelegatingMapParams(DataStreamLifecycle.INCLUDE_EFFECTIVE_RETENTION_PARAMS, params); builder.startObject(); builder.startArray(INDEX_TEMPLATES.getPreferredName()); for (Map.Entry indexTemplate : this.indexTemplates.entrySet()) { builder.startObject(); builder.field(NAME.getPreferredName(), indexTemplate.getKey()); builder.field(INDEX_TEMPLATE.getPreferredName()); - indexTemplate.getValue().toXContent(builder, withEffectiveRetentionParams, rolloverConfiguration, globalRetention); + indexTemplate.getValue().toXContent(builder, params, rolloverConfiguration, globalRetention); builder.endObject(); } builder.endArray(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java index 378df2d7d53e7..4ff38222ccc99 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java @@ -12,7 +12,6 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.admin.indices.rollover.RolloverConfiguration; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; -import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.Template; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -114,11 +113,10 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - Params withEffectiveRetentionParams = new DelegatingMapParams(DataStreamLifecycle.INCLUDE_EFFECTIVE_RETENTION_PARAMS, params); builder.startObject(); if (this.resolvedTemplate != null) { builder.field(TEMPLATE.getPreferredName()); - this.resolvedTemplate.toXContent(builder, withEffectiveRetentionParams, rolloverConfiguration, globalRetention); + this.resolvedTemplate.toXContent(builder, params, rolloverConfiguration, globalRetention); } if (this.overlappingTemplates != null) { builder.startArray(OVERLAPPING.getPreferredName()); diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java index 1a2103d665b38..f2a581472303b 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java @@ -20,7 +20,6 @@ import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.DataStreamAutoShardingEvent; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; -import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -544,11 +543,10 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - Params withEffectiveRetentionParams = new DelegatingMapParams(DataStreamLifecycle.INCLUDE_EFFECTIVE_RETENTION_PARAMS, params); builder.startObject(); builder.startArray(DATA_STREAMS_FIELD.getPreferredName()); for (DataStreamInfo dataStream : dataStreams) { - dataStream.toXContent(builder, withEffectiveRetentionParams, rolloverConfiguration, globalRetention); + dataStream.toXContent(builder, params, rolloverConfiguration, globalRetention); } builder.endArray(); builder.endObject(); diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/ExplainDataStreamLifecycleAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/ExplainDataStreamLifecycleAction.java similarity index 91% rename from modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/ExplainDataStreamLifecycleAction.java rename to server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/ExplainDataStreamLifecycleAction.java index 5bfdf2d382005..ee4f7fbaa9c59 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/ExplainDataStreamLifecycleAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/ExplainDataStreamLifecycleAction.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.datastreams.lifecycle.action; +package org.elasticsearch.action.datastreams.lifecycle; import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; @@ -14,11 +14,9 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverConfiguration; -import org.elasticsearch.action.datastreams.lifecycle.ExplainIndexDataStreamLifecycle; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.MasterNodeReadRequest; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; -import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -38,9 +36,7 @@ */ public class ExplainDataStreamLifecycleAction { - public static final ActionType INSTANCE = new ActionType<>( - "indices:admin/data_stream/lifecycle/explain" - ); + public static final ActionType INSTANCE = new ActionType<>("indices:admin/data_stream/lifecycle/explain"); private ExplainDataStreamLifecycleAction() {/* no instances */} @@ -216,11 +212,7 @@ public Iterator toXContentChunked(ToXContent.Params outerP return builder; }), Iterators.map(indices.iterator(), explainIndexDataLifecycle -> (builder, params) -> { builder.field(explainIndexDataLifecycle.getIndex()); - ToXContent.Params withEffectiveRetentionParams = new ToXContent.DelegatingMapParams( - DataStreamLifecycle.INCLUDE_EFFECTIVE_RETENTION_PARAMS, - params - ); - explainIndexDataLifecycle.toXContent(builder, withEffectiveRetentionParams, rolloverConfiguration, globalRetention); + explainIndexDataLifecycle.toXContent(builder, params, rolloverConfiguration, globalRetention); return builder; }), Iterators.single((builder, params) -> { builder.endObject(); diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/ExplainIndexDataStreamLifecycle.java b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/ExplainIndexDataStreamLifecycle.java index 2b79377fb71e0..bb6c3f90f1b0a 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/ExplainIndexDataStreamLifecycle.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/ExplainIndexDataStreamLifecycle.java @@ -132,11 +132,7 @@ public XContentBuilder toXContent( } if (this.lifecycle != null) { builder.field(LIFECYCLE_FIELD.getPreferredName()); - Params withEffectiveRetentionParams = new DelegatingMapParams( - DataStreamLifecycle.INCLUDE_EFFECTIVE_RETENTION_PARAMS, - params - ); - lifecycle.toXContent(builder, withEffectiveRetentionParams, rolloverConfiguration, globalRetention); + lifecycle.toXContent(builder, params, rolloverConfiguration, globalRetention); } if (this.error != null) { if (error.firstOccurrenceTimestamp() != -1L && error.recordedTimestamp() != -1L && error.retryCount() != -1) { diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamLifecycleAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java similarity index 90% rename from modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamLifecycleAction.java rename to server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java index 79e1b71771559..d0dd67b4b4db5 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamLifecycleAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java @@ -5,7 +5,7 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -package org.elasticsearch.datastreams.lifecycle.action; +package org.elasticsearch.action.datastreams.lifecycle; import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; @@ -38,9 +38,7 @@ */ public class GetDataStreamLifecycleAction { - public static final ActionType INSTANCE = new ActionType<>( - "indices:admin/data_stream/lifecycle/get" - ); + public static final ActionType INSTANCE = new ActionType<>("indices:admin/data_stream/lifecycle/get"); private GetDataStreamLifecycleAction() {/* no instances */} @@ -205,7 +203,7 @@ public Response( public Response(StreamInput in) throws IOException { this( - in.readCollectionAsList(Response.DataStreamLifecycle::new), + in.readCollectionAsList(DataStreamLifecycle::new), in.readOptionalWriteable(RolloverConfiguration::new), in.getTransportVersion().onOrAfter(TransportVersions.USE_DATA_STREAM_GLOBAL_RETENTION) ? in.readOptionalWriteable(DataStreamGlobalRetention::read) @@ -237,17 +235,22 @@ public Iterator toXContentChunked(ToXContent.Params outerParams) { builder.startObject(); builder.startArray(DATA_STREAMS_FIELD.getPreferredName()); return builder; - }), Iterators.map(dataStreamLifecycles.iterator(), dataStreamLifecycle -> (builder, params) -> { - ToXContent.Params withEffectiveRetentionParams = new ToXContent.DelegatingMapParams( - org.elasticsearch.cluster.metadata.DataStreamLifecycle.INCLUDE_EFFECTIVE_RETENTION_PARAMS, - params - ); - return dataStreamLifecycle.toXContent(builder, withEffectiveRetentionParams, rolloverConfiguration, globalRetention); - }), Iterators.single((builder, params) -> { - builder.endArray(); - builder.endObject(); - return builder; - })); + }), + Iterators.map( + dataStreamLifecycles.iterator(), + dataStreamLifecycle -> (builder, params) -> dataStreamLifecycle.toXContent( + builder, + params, + rolloverConfiguration, + globalRetention + ) + ), + Iterators.single((builder, params) -> { + builder.endArray(); + builder.endObject(); + return builder; + }) + ); } @Override diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/PutDataStreamLifecycleAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/PutDataStreamLifecycleAction.java similarity index 97% rename from modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/PutDataStreamLifecycleAction.java rename to server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/PutDataStreamLifecycleAction.java index c40988f1de6c7..8156e03b0cdd1 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/PutDataStreamLifecycleAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/PutDataStreamLifecycleAction.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.datastreams.lifecycle.action; +package org.elasticsearch.action.datastreams.lifecycle; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.IndicesRequest; @@ -14,7 +14,6 @@ import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; -import org.elasticsearch.cluster.metadata.DataStreamLifecycle.Downsampling; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; @@ -32,6 +31,7 @@ import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.DATA_RETENTION_FIELD; import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.DOWNSAMPLING_FIELD; +import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.Downsampling; import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.ENABLED_FIELD; /** diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java index 95b0d23b564a2..85de123de3145 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java @@ -441,7 +441,7 @@ private boolean isIgnored(RoutingNodes routingNodes, ShardRouting shard, ShardAs private void moveShards() { // Iterate over all started shards and check if they can remain. In the presence of throttling shard movements, // the goal of this iteration order is to achieve a fairer movement of shards from the nodes that are offloading the shards. - for (final var iterator = OrderedShardsIterator.create(routingNodes, moveOrdering); iterator.hasNext();) { + for (final var iterator = OrderedShardsIterator.createForNecessaryMoves(allocation, moveOrdering); iterator.hasNext();) { final var shardRouting = iterator.next(); if (shardRouting.started() == false) { @@ -500,7 +500,7 @@ private void balance() { // Iterate over all started shards and try to move any which are on undesired nodes. In the presence of throttling shard // movements, the goal of this iteration order is to achieve a fairer movement of shards from the nodes that are offloading the // shards. - for (final var iterator = OrderedShardsIterator.create(routingNodes, moveOrdering); iterator.hasNext();) { + for (final var iterator = OrderedShardsIterator.createForBalancing(allocation, moveOrdering); iterator.hasNext();) { final var shardRouting = iterator.next(); totalAllocations++; diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/OrderedShardsIterator.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/OrderedShardsIterator.java index efe59d98ba5df..d697686f9258b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/OrderedShardsIterator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/OrderedShardsIterator.java @@ -8,34 +8,71 @@ package org.elasticsearch.cluster.routing.allocation.allocator; +import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.collect.Iterators; import java.util.ArrayDeque; +import java.util.Arrays; +import java.util.Comparator; import java.util.Iterator; import java.util.NoSuchElementException; import java.util.Objects; /** - * This class iterates all shards from all nodes in order of allocation recency. - * Shards from the node that had a new shard allocation would appear in the end of iteration. + * This class iterates all shards from all nodes. + * The shard order is defined by + * (1) allocation recency: shards from the node that had a new shard allocation would appear in the end of iteration. + * (2) shard priority: for necessary moves data stream write shards, then regular index shards, then the rest + * for rebalancing the order is inverse */ public class OrderedShardsIterator implements Iterator { private final ArrayDeque queue; - public static OrderedShardsIterator create(RoutingNodes routingNodes, NodeAllocationOrdering ordering) { + public static OrderedShardsIterator createForNecessaryMoves(RoutingAllocation allocation, NodeAllocationOrdering ordering) { + return create(allocation.routingNodes(), createShardsComparator(allocation.metadata()), ordering); + } + + public static OrderedShardsIterator createForBalancing(RoutingAllocation allocation, NodeAllocationOrdering ordering) { + return create(allocation.routingNodes(), createShardsComparator(allocation.metadata()).reversed(), ordering); + } + + private static OrderedShardsIterator create( + RoutingNodes routingNodes, + Comparator shardOrder, + NodeAllocationOrdering nodeOrder + ) { var queue = new ArrayDeque(routingNodes.size()); - for (var nodeId : ordering.sort(routingNodes.getAllNodeIds())) { + for (var nodeId : nodeOrder.sort(routingNodes.getAllNodeIds())) { var node = routingNodes.node(nodeId); if (node.size() > 0) { - queue.add(new NodeAndShardIterator(nodeId, Iterators.forArray(node.copyShards()))); + queue.add(new NodeAndShardIterator(nodeId, sort(shardOrder, node.copyShards()))); } } return new OrderedShardsIterator(queue); } + private static Iterator sort(Comparator comparator, ShardRouting[] shards) { + Arrays.sort(shards, comparator); + return Iterators.forArray(shards); + } + + private static Comparator createShardsComparator(Metadata metadata) { + return Comparator.comparing(shard -> { + var lookup = metadata.getIndicesLookup().get(shard.getIndexName()); + if (lookup != null && lookup.getParentDataStream() != null) { + // prioritize write indices of the data stream + return Objects.equals(lookup.getParentDataStream().getWriteIndex(), shard.index()) ? 0 : 2; + } else { + // regular index + return 1; + } + }); + } + private OrderedShardsIterator(ArrayDeque queue) { this.queue = queue; } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java index 2fa3e903a0074..4b5c647da0c9a 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java @@ -112,9 +112,13 @@ private SearchHits buildSearchHits(SearchContext context, int[] docIdsToLoad, Pr context.getSearchExecutionContext().setLookupProviders(sourceProvider, ctx -> fieldLookupProvider); List processors = getProcessors(context.shardTarget(), fetchContext, profiler); - StoredFieldsSpec storedFieldsSpec = StoredFieldsSpec.build(processors, FetchSubPhaseProcessor::storedFieldsSpec); storedFieldsSpec = storedFieldsSpec.merge(new StoredFieldsSpec(false, false, sourceLoader.requiredStoredFields())); + // Ideally the required stored fields would be provided as constructor argument a few lines above, but that requires moving + // the getProcessors call to before the setLookupProviders call, which causes weird issues in InnerHitsPhase. + // setLookupProviders resets the SearchLookup used throughout the rest of the fetch phase, which StoredValueFetchers rely on + // to retrieve stored fields, and InnerHitsPhase is the last sub-fetch phase and re-runs the entire fetch phase. + fieldLookupProvider.setPreloadedStoredFieldNames(storedFieldsSpec.requiredStoredFields()); StoredFieldLoader storedFieldLoader = profiler.storedFields(StoredFieldLoader.fromSpec(storedFieldsSpec)); IdLoader idLoader = context.newIdLoader(); @@ -164,7 +168,7 @@ protected SearchHit nextDoc(int doc) throws IOException { leafIdLoader ); sourceProvider.source = hit.source(); - fieldLookupProvider.storedFields = hit.loadedFields(); + fieldLookupProvider.setPreloadedStoredFieldValues(hit.hit().getId(), hit.loadedFields()); for (FetchSubPhaseProcessor processor : processors) { processor.process(hit); } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/PreloadedFieldLookupProvider.java b/server/src/main/java/org/elasticsearch/search/fetch/PreloadedFieldLookupProvider.java index 31cd74c878a0f..b335ce4aa2800 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/PreloadedFieldLookupProvider.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/PreloadedFieldLookupProvider.java @@ -9,12 +9,16 @@ package org.elasticsearch.search.fetch; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.search.lookup.FieldLookup; import org.elasticsearch.search.lookup.LeafFieldLookupProvider; import java.io.IOException; +import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.function.Supplier; /** @@ -26,15 +30,22 @@ */ class PreloadedFieldLookupProvider implements LeafFieldLookupProvider { - Map> storedFields; - LeafFieldLookupProvider backUpLoader; - Supplier loaderSupplier; + private final SetOnce> preloadedStoredFieldNames = new SetOnce<>(); + private Map> preloadedStoredFieldValues; + private String id; + private LeafFieldLookupProvider backUpLoader; + private Supplier loaderSupplier; @Override public void populateFieldLookup(FieldLookup fieldLookup, int doc) throws IOException { String field = fieldLookup.fieldType().name(); - if (storedFields.containsKey(field)) { - fieldLookup.setValues(storedFields.get(field)); + + if (field.equals(IdFieldMapper.NAME)) { + fieldLookup.setValues(Collections.singletonList(id)); + return; + } + if (preloadedStoredFieldNames.get().contains(field)) { + fieldLookup.setValues(preloadedStoredFieldValues.get(field)); return; } // stored field not preloaded, go and get it directly @@ -44,8 +55,26 @@ public void populateFieldLookup(FieldLookup fieldLookup, int doc) throws IOExcep backUpLoader.populateFieldLookup(fieldLookup, doc); } + void setPreloadedStoredFieldNames(Set preloadedStoredFieldNames) { + this.preloadedStoredFieldNames.set(preloadedStoredFieldNames); + } + + void setPreloadedStoredFieldValues(String id, Map> preloadedStoredFieldValues) { + assert preloadedStoredFieldNames.get().containsAll(preloadedStoredFieldValues.keySet()) + : "Provided stored field that was not expected to be preloaded? " + + preloadedStoredFieldValues.keySet() + + " - " + + preloadedStoredFieldNames; + this.preloadedStoredFieldValues = preloadedStoredFieldValues; + this.id = id; + } + void setNextReader(LeafReaderContext ctx) { backUpLoader = null; loaderSupplier = () -> LeafFieldLookupProvider.fromStoredFields().apply(ctx); } + + LeafFieldLookupProvider getBackUpLoader() { + return backUpLoader; + } } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchFieldsPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchFieldsPhase.java index 882eb1cf9c75b..287c47505bf3a 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchFieldsPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchFieldsPhase.java @@ -40,6 +40,7 @@ public final class FetchFieldsPhase implements FetchSubPhase { private static final List DEFAULT_METADATA_FIELDS = List.of( new FieldAndFormat(IgnoredFieldMapper.NAME, null), new FieldAndFormat(RoutingFieldMapper.NAME, null), + // will only be fetched when mapped (older archived indices) new FieldAndFormat(LegacyTypeFieldMapper.NAME, null) ); @@ -95,9 +96,9 @@ public void setNextReader(LeafReaderContext readerContext) { @Override public StoredFieldsSpec storedFieldsSpec() { if (fieldFetcher != null) { - return fieldFetcher.storedFieldsSpec(); + return metadataFieldFetcher.storedFieldsSpec().merge(fieldFetcher.storedFieldsSpec()); } - return StoredFieldsSpec.NO_REQUIREMENTS; + return metadataFieldFetcher.storedFieldsSpec(); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/lookup/FieldLookup.java b/server/src/main/java/org/elasticsearch/search/lookup/FieldLookup.java index fa4eb8f21f78c..97ff1862c7852 100644 --- a/server/src/main/java/org/elasticsearch/search/lookup/FieldLookup.java +++ b/server/src/main/java/org/elasticsearch/search/lookup/FieldLookup.java @@ -31,7 +31,9 @@ public MappedFieldType fieldType() { */ public void setValues(List values) { assert valuesLoaded == false : "Call clear() before calling setValues()"; - values.stream().map(fieldType::valueForDisplay).forEach(this.values::add); + if (values != null) { + values.stream().map(fieldType::valueForDisplay).forEach(this.values::add); + } this.valuesLoaded = true; } diff --git a/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json b/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json index ead7387b0e1ac..503f02b25eb8d 100644 --- a/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json +++ b/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json @@ -4,7 +4,7 @@ "UNSTABLE_CLUSTER_TROUBLESHOOTING": "troubleshooting-unstable-cluster.html", "LAGGING_NODE_TROUBLESHOOTING": "troubleshooting-unstable-cluster.html#_diagnosing_lagging_nodes_2", "SHARD_LOCK_TROUBLESHOOTING": "troubleshooting-unstable-cluster.html#_diagnosing_shardlockobtainfailedexception_failures_2", - "CONCURRENT_REPOSITORY_WRITERS": "add-repository.html", + "CONCURRENT_REPOSITORY_WRITERS": "diagnosing-corrupted-repositories.html", "ARCHIVE_INDICES": "archive-indices.html", "HTTP_TRACER": "modules-network.html#http-rest-request-tracer", "LOGGING": "logging.html", diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateResponseTests.java index 025f51b7df997..d31c9fddf2712 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateResponseTests.java @@ -34,7 +34,6 @@ import static org.elasticsearch.cluster.metadata.ComponentTemplateTests.randomSettings; import static org.elasticsearch.xcontent.ToXContent.EMPTY_PARAMS; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.not; public class GetComponentTemplateResponseTests extends AbstractWireSerializingTestCase { @Override @@ -103,9 +102,6 @@ public void testXContentSerializationWithRolloverAndEffectiveRetention() throws .keySet()) { assertThat(serialized, containsString(label)); } - // We check that even if there was no retention provided by the user, the global retention applies - assertThat(serialized, not(containsString("data_retention"))); - assertThat(serialized, containsString("effective_retention")); } } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/action/ExplainDataStreamLifecycleResponseTests.java b/server/src/test/java/org/elasticsearch/action/datastreams/lifecycle/ExplainDataStreamLifecycleResponseTests.java similarity index 91% rename from modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/action/ExplainDataStreamLifecycleResponseTests.java rename to server/src/test/java/org/elasticsearch/action/datastreams/lifecycle/ExplainDataStreamLifecycleResponseTests.java index 462c0626c6296..a47eca7692842 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/action/ExplainDataStreamLifecycleResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/datastreams/lifecycle/ExplainDataStreamLifecycleResponseTests.java @@ -6,14 +6,12 @@ * Side Public License, v 1. */ -package org.elasticsearch.datastreams.lifecycle.action; +package org.elasticsearch.action.datastreams.lifecycle; import org.elasticsearch.action.admin.indices.rollover.MaxPrimaryShardDocsCondition; import org.elasticsearch.action.admin.indices.rollover.MinPrimaryShardDocsCondition; import org.elasticsearch.action.admin.indices.rollover.RolloverConditions; import org.elasticsearch.action.admin.indices.rollover.RolloverConfiguration; -import org.elasticsearch.action.datastreams.lifecycle.ErrorEntry; -import org.elasticsearch.action.datastreams.lifecycle.ExplainIndexDataStreamLifecycle; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.DataStreamTestHelper; @@ -35,9 +33,8 @@ import java.util.List; import java.util.Map; -import static org.elasticsearch.datastreams.lifecycle.action.ExplainDataStreamLifecycleAction.Response; +import static org.elasticsearch.action.datastreams.lifecycle.ExplainDataStreamLifecycleAction.Response; import static org.elasticsearch.xcontent.ToXContent.EMPTY_PARAMS; -import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; @@ -196,23 +193,6 @@ public void testToXContent() throws IOException { Map lifecycleMap = (Map) explainIndexMap.get("lifecycle"); assertThat(lifecycleMap.get("data_retention"), nullValue()); - if (response.getGlobalRetention() == null) { - assertThat(lifecycleMap.get("effective_retention"), nullValue()); - assertThat(lifecycleMap.get("retention_determined_by"), nullValue()); - } else if (response.getGlobalRetention().getDefaultRetention() != null) { - assertThat( - lifecycleMap.get("effective_retention"), - equalTo(response.getGlobalRetention().getDefaultRetention().getStringRep()) - ); - assertThat(lifecycleMap.get("retention_determined_by"), equalTo("default_global_retention")); - } else { - assertThat( - lifecycleMap.get("effective_retention"), - equalTo(response.getGlobalRetention().getMaxRetention().getStringRep()) - ); - assertThat(lifecycleMap.get("retention_determined_by"), equalTo("max_global_retention")); - } - Map lifecycleRollover = (Map) lifecycleMap.get("rollover"); assertThat(lifecycleRollover.get("min_primary_shard_docs"), is(4)); assertThat(lifecycleRollover.get("max_primary_shard_docs"), is(9)); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/OrderedShardsIteratorTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/OrderedShardsIteratorTests.java index f9ef743dc7fad..bc0590625d290 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/OrderedShardsIteratorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/OrderedShardsIteratorTests.java @@ -8,14 +8,23 @@ package org.elasticsearch.cluster.routing.allocation.allocator; +import org.elasticsearch.cluster.ClusterInfo; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ESAllocationTestCase; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.IndexRoutingTable; -import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.snapshots.SnapshotShardSizeInfo; import org.hamcrest.Description; import org.hamcrest.TypeSafeMatcher; @@ -28,6 +37,7 @@ import static org.elasticsearch.cluster.routing.TestShardRouting.newShardRouting; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; @@ -49,7 +59,10 @@ public void testOrdersShardsAccordingToAllocationRecency() { var ordering = new NodeAllocationOrdering(); ordering.recordAllocation("node-1"); - var iterator = createOrderedShardsIterator(nodes, routing, ordering); + var iterator = OrderedShardsIterator.createForNecessaryMoves( + createRoutingAllocation(nodes, Metadata.EMPTY_METADATA, routing), + ordering + ); // order within same priority is not defined // no recorded allocations first @@ -81,7 +94,10 @@ public void testReOrdersShardDuringIteration() { ordering.recordAllocation("node-3"); ordering.recordAllocation("node-2"); - var iterator = createOrderedShardsIterator(nodes, routing, ordering); + var iterator = OrderedShardsIterator.createForNecessaryMoves( + createRoutingAllocation(nodes, Metadata.EMPTY_METADATA, routing), + ordering + ); var first = iterator.next(); assertThat(first, anyOf(isIndexShardAt("index-1a", "node-1"), isIndexShardAt("index-1b", "node-1"))); @@ -93,13 +109,76 @@ public void testReOrdersShardDuringIteration() { assertThat(iterator.hasNext(), equalTo(false)); } - private OrderedShardsIterator createOrderedShardsIterator(DiscoveryNodes nodes, RoutingTable routing, NodeAllocationOrdering ordering) { - var routingNodes = randomBoolean() ? RoutingNodes.mutable(routing, nodes) : RoutingNodes.immutable(routing, nodes); - return OrderedShardsIterator.create(routingNodes, ordering); + public void testShouldOrderShardByPriority() { + + var nodes = DiscoveryNodes.builder().add(newNode("node-1")).add(newNode("node-2")).build(); + + IndexMetadata lookup = IndexMetadata.builder("lookup").settings(indexSettings(IndexVersion.current(), 1, 0)).build(); + IndexMetadata ds1 = IndexMetadata.builder(".ds-data-stream-2024.04.18-000001") + .settings(indexSettings(IndexVersion.current(), 1, 0)) + .build(); + IndexMetadata ds2 = IndexMetadata.builder(".ds-data-stream-2024.04.18-000002") + .settings(indexSettings(IndexVersion.current(), 1, 0)) + .build(); + + var metadata = Metadata.builder() + .put(lookup, false) + .put(ds1, false) + .put(ds2, false) + .put(DataStream.builder("data-stream", List.of(ds1.getIndex(), ds2.getIndex())).build()) + .build(); + + var routing = RoutingTable.builder() + .add(index(lookup.getIndex(), "node-1")) + .add(index(ds1.getIndex(), "node-1")) + .add(index(ds2.getIndex(), "node-1")) + .build(); + + // when performing necessary moves (such as preparation for the node shutdown) write shards should be moved first + assertThat( + next( + 3, + OrderedShardsIterator.createForNecessaryMoves( + createRoutingAllocation(nodes, metadata, routing), + new NodeAllocationOrdering() + ) + ), + contains( + isIndexShardAt(".ds-data-stream-2024.04.18-000002", "node-1"), + isIndexShardAt("lookup", "node-1"), + isIndexShardAt(".ds-data-stream-2024.04.18-000001", "node-1") + ) + ); + + // when performing rebalancing write shards should be moved last + assertThat( + next( + 3, + OrderedShardsIterator.createForBalancing(createRoutingAllocation(nodes, metadata, routing), new NodeAllocationOrdering()) + ), + contains( + isIndexShardAt(".ds-data-stream-2024.04.18-000001", "node-1"), + isIndexShardAt("lookup", "node-1"), + isIndexShardAt(".ds-data-stream-2024.04.18-000002", "node-1") + ) + ); + } + + private static RoutingAllocation createRoutingAllocation(DiscoveryNodes nodes, Metadata metadata, RoutingTable routing) { + return new RoutingAllocation( + new AllocationDeciders(List.of()), + ClusterState.builder(ClusterName.DEFAULT).nodes(nodes).metadata(metadata).routingTable(routing).build(), + ClusterInfo.EMPTY, + SnapshotShardSizeInfo.EMPTY, + 0 + ); } private static IndexRoutingTable index(String indexName, String nodeId) { - var index = new Index(indexName, "_na_"); + return index(new Index(indexName, "_na_"), nodeId); + } + + private static IndexRoutingTable index(Index index, String nodeId) { return IndexRoutingTable.builder(index).addShard(newShardRouting(new ShardId(index, 0), nodeId, true, STARTED)).build(); } @@ -120,7 +199,9 @@ protected boolean matchesSafely(ShardRouting item) { } @Override - public void describeTo(Description description) {} + public void describeTo(Description description) { + description.appendText("[" + indexName + "][0], node[" + nodeId + "]"); + } }; } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalScriptedMetricTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalScriptedMetricTests.java index 25d69a956ff81..3577b6afd73df 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalScriptedMetricTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalScriptedMetricTests.java @@ -26,7 +26,6 @@ import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.function.Supplier; @@ -38,7 +37,6 @@ public class InternalScriptedMetricTests extends InternalAggregationTestCase[] valueTypes; @SuppressWarnings({ "rawtypes", "unchecked" }) private final Supplier[] leafValueSuppliers = new Supplier[] { () -> randomInt(), @@ -50,24 +48,13 @@ public class InternalScriptedMetricTests extends InternalAggregationTestCase new GeoPoint(randomDouble(), randomDouble()), () -> null }; @SuppressWarnings({ "rawtypes", "unchecked" }) - private final Supplier[] nestedValueSuppliers = new Supplier[] { () -> new HashMap(), () -> new ArrayList<>() }; + private final Supplier[] nestedValueSuppliers = new Supplier[] { HashMap::new, ArrayList::new }; @Override @SuppressWarnings({ "rawtypes", "unchecked" }) public void setUp() throws Exception { super.setUp(); hasReduceScript = randomBoolean(); - // we want the same value types (also for nested lists, maps) for all random aggregations - int levels = randomIntBetween(1, 3); - valueTypes = new Supplier[levels]; - for (int i = 0; i < levels; i++) { - if (i < levels - 1) { - valueTypes[i] = randomFrom(nestedValueSuppliers); - } else { - // the last one needs to be a leaf value, not map or list - valueTypes[i] = randomFrom(leafValueSuppliers); - } - } } @Override @@ -178,51 +165,6 @@ public InternalScriptedMetric createTestInstanceForXContent() { ); } - private static void assertValues(Object expected, Object actual) { - if (expected instanceof Long) { - // longs that fit into the integer range are parsed back as integer - if (actual instanceof Integer) { - assertEquals(((Long) expected).intValue(), actual); - } else { - assertEquals(expected, actual); - } - } else if (expected instanceof Float) { - // based on the xContent type, floats are sometimes parsed back as doubles - if (actual instanceof Double) { - assertEquals(expected, ((Double) actual).floatValue()); - } else { - assertEquals(expected, actual); - } - } else if (expected instanceof GeoPoint point) { - assertTrue(actual instanceof Map); - @SuppressWarnings("unchecked") - Map pointMap = (Map) actual; - assertEquals(point.getLat(), pointMap.get("lat")); - assertEquals(point.getLon(), pointMap.get("lon")); - } else if (expected instanceof Map) { - @SuppressWarnings("unchecked") - Map expectedMap = (Map) expected; - @SuppressWarnings("unchecked") - Map actualMap = (Map) actual; - assertEquals(expectedMap.size(), actualMap.size()); - for (String key : expectedMap.keySet()) { - assertValues(expectedMap.get(key), actualMap.get(key)); - } - } else if (expected instanceof List) { - @SuppressWarnings("unchecked") - List expectedList = (List) expected; - @SuppressWarnings("unchecked") - List actualList = (List) actual; - assertEquals(expectedList.size(), actualList.size()); - Iterator actualIterator = actualList.iterator(); - for (Object element : expectedList) { - assertValues(element, actualIterator.next()); - } - } else { - assertEquals(expected, actual); - } - } - @Override protected InternalScriptedMetric mutateInstance(InternalScriptedMetric instance) { String name = instance.getName(); diff --git a/server/src/test/java/org/elasticsearch/search/fetch/PreloadedFieldLookupProviderTests.java b/server/src/test/java/org/elasticsearch/search/fetch/PreloadedFieldLookupProviderTests.java index 85d9c32a1ee5b..13cdb01156f05 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/PreloadedFieldLookupProviderTests.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/PreloadedFieldLookupProviderTests.java @@ -13,11 +13,13 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.memory.MemoryIndex; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.search.lookup.FieldLookup; import org.elasticsearch.test.ESTestCase; import java.io.IOException; +import java.util.Collections; import java.util.List; import java.util.Locale; import java.util.Map; @@ -30,7 +32,16 @@ public class PreloadedFieldLookupProviderTests extends ESTestCase { public void testFallback() throws IOException { PreloadedFieldLookupProvider lookup = new PreloadedFieldLookupProvider(); - lookup.storedFields = Map.of("foo", List.of("bar")); + lookup.setPreloadedStoredFieldNames(Collections.singleton("foo")); + lookup.setPreloadedStoredFieldValues("id", Map.of("foo", List.of("bar"))); + + MappedFieldType idFieldType = mock(MappedFieldType.class); + when(idFieldType.name()).thenReturn(IdFieldMapper.NAME); + when(idFieldType.valueForDisplay(any())).then(invocation -> (invocation.getArguments()[0])); + FieldLookup idFieldLookup = new FieldLookup(idFieldType); + lookup.populateFieldLookup(idFieldLookup, 0); + assertEquals("id", idFieldLookup.getValue()); + assertNull(lookup.getBackUpLoader()); // fallback didn't get used because 'foo' is in the list MappedFieldType fieldType = mock(MappedFieldType.class); when(fieldType.name()).thenReturn("foo"); @@ -39,7 +50,7 @@ public void testFallback() throws IOException { lookup.populateFieldLookup(fieldLookup, 0); assertEquals("BAR", fieldLookup.getValue()); - assertNull(lookup.backUpLoader); // fallback didn't get used because 'foo' is in the list + assertNull(lookup.getBackUpLoader()); // fallback didn't get used because 'foo' is in the list MappedFieldType unloadedFieldType = mock(MappedFieldType.class); when(unloadedFieldType.name()).thenReturn("unloaded"); @@ -56,5 +67,4 @@ public void testFallback() throws IOException { lookup.populateFieldLookup(unloadedFieldLookup, 0); assertEquals("VALUE", unloadedFieldLookup.getValue()); } - } diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/FetchFieldsPhaseTests.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/FetchFieldsPhaseTests.java index 39e73837c83ea..3a7460c05ca87 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/FetchFieldsPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/FetchFieldsPhaseTests.java @@ -19,6 +19,7 @@ import org.elasticsearch.index.mapper.DocValueFetcher; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NestedLookup; +import org.elasticsearch.index.mapper.StoredValueFetcher; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.SearchHit; @@ -26,6 +27,9 @@ import org.elasticsearch.search.fetch.FetchContext; import org.elasticsearch.search.fetch.FetchSubPhase; import org.elasticsearch.search.fetch.FetchSubPhaseProcessor; +import org.elasticsearch.search.fetch.StoredFieldsContext; +import org.elasticsearch.search.fetch.StoredFieldsSpec; +import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.search.lookup.Source; import org.elasticsearch.test.ESTestCase; @@ -35,6 +39,7 @@ import java.util.Set; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -90,6 +95,58 @@ public void testDocValueFetcher() throws IOException { reader.close(); dir.close(); + } + + public void testStoredFieldsSpec() { + StoredFieldsContext storedFieldsContext = StoredFieldsContext.fromList(List.of("stored", "_metadata")); + FetchFieldsContext ffc = new FetchFieldsContext(List.of(new FieldAndFormat("field", null))); + + SearchLookup searchLookup = mock(SearchLookup.class); + + SearchExecutionContext sec = mock(SearchExecutionContext.class); + when(sec.isMetadataField(any())).then(invocation -> invocation.getArguments()[0].toString().startsWith("_")); + + MappedFieldType routingFt = mock(MappedFieldType.class); + when(routingFt.valueFetcher(any(), any())).thenReturn(new StoredValueFetcher(searchLookup, "_routing")); + when(sec.getFieldType(eq("_routing"))).thenReturn(routingFt); + + // this would normally not be mapped -> getMatchingFieldsNames would not resolve it (unless for older archive indices) + MappedFieldType typeFt = mock(MappedFieldType.class); + when(typeFt.valueFetcher(any(), any())).thenReturn(new StoredValueFetcher(searchLookup, "_type")); + when(sec.getFieldType(eq("_type"))).thenReturn(typeFt); + + MappedFieldType ignoredFt = mock(MappedFieldType.class); + when(ignoredFt.valueFetcher(any(), any())).thenReturn(new StoredValueFetcher(searchLookup, "_ignored")); + when(sec.getFieldType(eq("_ignored"))).thenReturn(ignoredFt); + + // Ideally we would test that explicitly requested stored fields are included in stored fields spec, but isStored is final hence it + // can't be mocked. In reality, _metadata would be included but stored would not. + MappedFieldType storedFt = mock(MappedFieldType.class); + when(sec.getFieldType(eq("stored"))).thenReturn(storedFt); + MappedFieldType metadataFt = mock(MappedFieldType.class); + when(sec.getFieldType(eq("_metadata"))).thenReturn(metadataFt); + + MappedFieldType fieldType = mock(MappedFieldType.class); + when(fieldType.valueFetcher(any(), any())).thenReturn( + new DocValueFetcher( + DocValueFormat.RAW, + new SortedNumericIndexFieldData("field", IndexNumericFieldData.NumericType.LONG, CoreValuesSourceType.NUMERIC, null, false) + ) + ); + when(sec.getFieldType(eq("field"))).thenReturn(fieldType); + when(sec.getMatchingFieldNames(any())).then(invocation -> Set.of(invocation.getArguments()[0])); + when(sec.nestedLookup()).thenReturn(NestedLookup.EMPTY); + FetchContext fetchContext = mock(FetchContext.class); + when(fetchContext.fetchFieldsContext()).thenReturn(ffc); + when(fetchContext.storedFieldsContext()).thenReturn(storedFieldsContext); + when(fetchContext.getSearchExecutionContext()).thenReturn(sec); + FetchFieldsPhase fetchFieldsPhase = new FetchFieldsPhase(); + FetchSubPhaseProcessor processor = fetchFieldsPhase.getProcessor(fetchContext); + StoredFieldsSpec storedFieldsSpec = processor.storedFieldsSpec(); + assertEquals(3, storedFieldsSpec.requiredStoredFields().size()); + assertTrue(storedFieldsSpec.requiredStoredFields().contains("_routing")); + assertTrue(storedFieldsSpec.requiredStoredFields().contains("_ignored")); + assertTrue(storedFieldsSpec.requiredStoredFields().contains("_type")); } } diff --git a/x-pack/plugin/async-search/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/search/AsyncSearchSecurityIT.java b/x-pack/plugin/async-search/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/search/AsyncSearchSecurityIT.java index f628566587611..05eddd742960c 100644 --- a/x-pack/plugin/async-search/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/search/AsyncSearchSecurityIT.java +++ b/x-pack/plugin/async-search/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/search/AsyncSearchSecurityIT.java @@ -177,7 +177,6 @@ public void testWithUsers() throws Exception { * the testWithUsers test is generally testing). * @throws IOException */ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/106871") public void testStatusWithUsersWhileSearchIsRunning() throws IOException { assumeTrue("[error_query] is only available in snapshot builds", Build.current().isSnapshot()); String user = randomFrom("user1", "user2"); @@ -250,6 +249,9 @@ private static void userBasedPermissionsAsserts(String user, String other, Strin // user-monitor can access the status assertOK(getAsyncStatus(id, "user-monitor")); + // user-monitor can access status and set keep_alive + assertOK(getAsyncStatusAndSetKeepAlive(id, "user-monitor")); + // user-monitor cannot access the result exc = expectThrows(ResponseException.class, () -> getAsyncSearch(id, "user-monitor")); assertThat(exc.getResponse().getStatusLine().getStatusCode(), equalTo(404)); @@ -485,6 +487,13 @@ static Response getAsyncStatus(String id, String user) throws IOException { return client().performRequest(request); } + static Response getAsyncStatusAndSetKeepAlive(String id, String user) throws IOException { + final Request request = new Request("GET", "/_async_search/status/" + id); + setRunAsHeader(request, user); + request.addParameter("keep_alive", "3m"); + return client().performRequest(request); + } + static Response getAsyncSearch(String id, String user) throws IOException { final Request request = new Request("GET", "/_async_search/" + id); setRunAsHeader(request, user); diff --git a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/TransportGetAsyncStatusAction.java b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/TransportGetAsyncStatusAction.java index cc5cd797f3fbc..cc27e82a69388 100644 --- a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/TransportGetAsyncStatusAction.java +++ b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/TransportGetAsyncStatusAction.java @@ -35,6 +35,7 @@ import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.xpack.core.ClientHelper.ASYNC_SEARCH_ORIGIN; +import static org.elasticsearch.xpack.core.async.AsyncTaskIndexService.getTask; public class TransportGetAsyncStatusAction extends HandledTransportAction { private final TransportService transportService; @@ -76,7 +77,7 @@ protected void doExecute(Task task, GetAsyncStatusRequest request, ActionListene if (request.getKeepAlive() != null && request.getKeepAlive().getMillis() > 0) { long expirationTime = System.currentTimeMillis() + request.getKeepAlive().getMillis(); store.updateExpirationTime(searchId.getDocId(), expirationTime, ActionListener.wrap(p -> { - AsyncSearchTask asyncSearchTask = store.getTaskAndCheckAuthentication(taskManager, searchId, AsyncSearchTask.class); + AsyncSearchTask asyncSearchTask = getTask(taskManager, searchId, AsyncSearchTask.class); if (asyncSearchTask != null) { asyncSearchTask.setExpirationTime(expirationTime); } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/entsearch/search_default_pipeline.json b/x-pack/plugin/core/template-resources/src/main/resources/entsearch/search_default_pipeline.json new file mode 100644 index 0000000000000..bd556900a42e1 --- /dev/null +++ b/x-pack/plugin/core/template-resources/src/main/resources/entsearch/search_default_pipeline.json @@ -0,0 +1,130 @@ +{ + "version": ${xpack.application.connector.template.version}, + "description": "Default search ingest pipeline", + "_meta": { + "managed_by": "Search", + "managed": true + }, + "processors": [ + { + "attachment": { + "description": "Extract text from binary attachments", + "field": "_attachment", + "target_field": "_extracted_attachment", + "ignore_missing": true, + "indexed_chars_field": "_attachment_indexed_chars", + "if": "ctx?._extract_binary_content == true", + "on_failure": [ + { + "append": { + "description": "Record error information", + "field": "_ingestion_errors", + "value": "Processor 'attachment' in pipeline '{{ _ingest.on_failure_pipeline }}' failed with message '{{ _ingest.on_failure_message }}'" + } + } + ], + "remove_binary": false + } + }, + { + "set": { + "tag": "set_body", + "description": "Set any extracted text on the 'body' field", + "field": "body", + "copy_from": "_extracted_attachment.content", + "ignore_empty_value": true, + "if": "ctx?._extract_binary_content == true", + "on_failure": [ + { + "append": { + "description": "Record error information", + "field": "_ingestion_errors", + "value": "Processor 'set' with tag 'set_body' in pipeline '{{ _ingest.on_failure_pipeline }}' failed with message '{{ _ingest.on_failure_message }}'" + } + } + ] + } + }, + { + "gsub": { + "tag": "remove_replacement_chars", + "description": "Remove unicode 'replacement' characters", + "field": "body", + "pattern": "�", + "replacement": "", + "ignore_missing": true, + "if": "ctx?._extract_binary_content == true", + "on_failure": [ + { + "append": { + "description": "Record error information", + "field": "_ingestion_errors", + "value": "Processor 'gsub' with tag 'remove_replacement_chars' in pipeline '{{ _ingest.on_failure_pipeline }}' failed with message '{{ _ingest.on_failure_message }}'" + } + } + ] + } + }, + { + "gsub": { + "tag": "remove_extra_whitespace", + "description": "Squish whitespace", + "field": "body", + "pattern": "\\s+", + "replacement": " ", + "ignore_missing": true, + "if": "ctx?._reduce_whitespace == true", + "on_failure": [ + { + "append": { + "description": "Record error information", + "field": "_ingestion_errors", + "value": "Processor 'gsub' with tag 'remove_extra_whitespace' in pipeline '{{ _ingest.on_failure_pipeline }}' failed with message '{{ _ingest.on_failure_message }}'" + } + } + ] + } + }, + { + "trim": { + "description": "Trim leading and trailing whitespace", + "field": "body", + "ignore_missing": true, + "if": "ctx?._reduce_whitespace == true", + "on_failure": [ + { + "append": { + "description": "Record error information", + "field": "_ingestion_errors", + "value": "Processor 'trim' in pipeline '{{ _ingest.on_failure_pipeline }}' failed with message '{{ _ingest.on_failure_message }}'" + } + } + ] + } + }, + { + "remove": { + "tag": "remove_meta_fields", + "description": "Remove meta fields", + "field": [ + "_attachment", + "_attachment_indexed_chars", + "_extracted_attachment", + "_extract_binary_content", + "_reduce_whitespace", + "_run_ml_inference" + ], + "ignore_missing": true, + "on_failure": [ + { + "append": { + "description": "Record error information", + "field": "_ingestion_errors", + "value": "Processor 'remove' with tag 'remove_meta_fields' in pipeline '{{ _ingest.on_failure_pipeline }}' failed with message '{{ _ingest.on_failure_message }}'" + } + } + ] + } + } + ] +} diff --git a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleIT.java b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleIT.java index 57024acee809f..d8dd030903e0e 100644 --- a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleIT.java +++ b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleIT.java @@ -9,6 +9,7 @@ import org.elasticsearch.action.admin.indices.rollover.RolloverAction; import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; +import org.elasticsearch.action.datastreams.lifecycle.PutDataStreamLifecycleAction; import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.DataStreamLifecycle.Downsampling; @@ -16,7 +17,6 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.datastreams.DataStreamsPlugin; import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleService; -import org.elasticsearch.datastreams.lifecycle.action.PutDataStreamLifecycleAction; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.test.ESIntegTestCase; diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichCache.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichCache.java index 749bce59a4bbb..722328b6b76d6 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichCache.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichCache.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.enrich; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.metadata.IndexAbstraction; @@ -24,6 +25,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.function.BiConsumer; /** * A simple cache for enrich that uses {@link Cache}. There is one instance of this cache and @@ -52,6 +54,32 @@ public final class EnrichCache { this.cache = CacheBuilder.>>builder().setMaximumWeight(maxSize).build(); } + /** + * This method notifies the given listener of the value in this cache for the given searchRequest. If there is no value in the cache + * for the searchRequest, then the new cache value is computed using searchResponseFetcher. + * @param searchRequest The key for the cache request + * @param searchResponseFetcher The function used to compute the value to be put in the cache, if there is no value in the cache already + * @param listener A listener to be notified of the value in the cache + */ + public void computeIfAbsent( + SearchRequest searchRequest, + BiConsumer> searchResponseFetcher, + ActionListener>> listener + ) { + // intentionally non-locking for simplicity...it's OK if we re-put the same key/value in the cache during a race condition. + List> response = get(searchRequest); + if (response != null) { + listener.onResponse(response); + } else { + searchResponseFetcher.accept(searchRequest, ActionListener.wrap(resp -> { + List> value = toCacheValue(resp); + put(searchRequest, value); + listener.onResponse(deepCopy(value, false)); + }, listener::onFailure)); + } + } + + // non-private for unit testing only List> get(SearchRequest searchRequest) { String enrichIndex = getEnrichIndexKey(searchRequest); CacheKey cacheKey = new CacheKey(enrichIndex, searchRequest); @@ -64,6 +92,7 @@ public final class EnrichCache { } } + // non-private for unit testing only void put(SearchRequest searchRequest, List> response) { String enrichIndex = getEnrichIndexKey(searchRequest); CacheKey cacheKey = new CacheKey(enrichIndex, searchRequest); diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichProcessorFactory.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichProcessorFactory.java index 907ebb0c9ce3a..9890a96aae820 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichProcessorFactory.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichProcessorFactory.java @@ -131,16 +131,15 @@ public void accept(ClusterState state) { Client originClient = new OriginSettingClient(client, ENRICH_ORIGIN); return (req, handler) -> { // intentionally non-locking for simplicity...it's OK if we re-put the same key/value in the cache during a race condition. - List> response = enrichCache.get(req); - if (response != null) { - handler.accept(response, null); - } else { - originClient.execute(EnrichCoordinatorProxyAction.INSTANCE, req, ActionListener.wrap(resp -> { - List> value = EnrichCache.toCacheValue(resp); - enrichCache.put(req, value); - handler.accept(EnrichCache.deepCopy(value, false), null); - }, e -> { handler.accept(null, e); })); - } + enrichCache.computeIfAbsent( + req, + (searchRequest, searchResponseActionListener) -> originClient.execute( + EnrichCoordinatorProxyAction.INSTANCE, + searchRequest, + searchResponseActionListener + ), + ActionListener.wrap(resp -> handler.accept(resp, null), e -> handler.accept(null, e)) + ); }; } } diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichCacheTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichCacheTests.java index 735d68f61416e..fe3c3b3e467ef 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichCacheTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichCacheTests.java @@ -6,21 +6,31 @@ */ package org.elasticsearch.xpack.enrich; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.metadata.AliasMetadata; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.query.MatchQueryBuilder; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.core.enrich.EnrichPolicy; +import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -138,6 +148,114 @@ public void testCaching() { assertThat(cacheStats.getEvictions(), equalTo(4L)); } + public void testPutIfAbsent() throws InterruptedException { + // Emulate cluster metadata: + // (two enrich indices with corresponding alias entries) + var metadata = Metadata.builder() + .put( + IndexMetadata.builder(EnrichPolicy.getBaseName("policy1") + "-1") + .settings(settings(IndexVersion.current())) + .numberOfShards(1) + .numberOfReplicas(0) + .putAlias(AliasMetadata.builder(EnrichPolicy.getBaseName("policy1")).build()) + ) + .put( + IndexMetadata.builder(EnrichPolicy.getBaseName("policy2") + "-1") + .settings(settings(IndexVersion.current())) + .numberOfShards(1) + .numberOfReplicas(0) + .putAlias(AliasMetadata.builder(EnrichPolicy.getBaseName("policy2")).build()) + ) + .build(); + + // Emulated search requests that an enrich processor could generate: + // (two unique searches for two enrich policies) + var searchRequest1 = new SearchRequest(EnrichPolicy.getBaseName("policy1")).source( + new SearchSourceBuilder().query(new MatchQueryBuilder("match_field", "1")) + ); + final List> searchResponseMap = List.of( + Map.of("key1", "value1", "key2", "value2"), + Map.of("key3", "value3", "key4", "value4") + ); + EnrichCache enrichCache = new EnrichCache(3); + enrichCache.setMetadata(metadata); + + { + CountDownLatch queriedDatabaseLatch = new CountDownLatch(1); + CountDownLatch notifiedOfResultLatch = new CountDownLatch(1); + enrichCache.computeIfAbsent(searchRequest1, (searchRequest, searchResponseActionListener) -> { + SearchResponse searchResponse = convertToSearchResponse(searchResponseMap); + searchResponseActionListener.onResponse(searchResponse); + searchResponse.decRef(); + queriedDatabaseLatch.countDown(); + }, new ActionListener<>() { + @Override + public void onResponse(List> response) { + assertThat(response, equalTo(searchResponseMap)); + notifiedOfResultLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + fail(e); + } + }); + assertThat(queriedDatabaseLatch.await(5, TimeUnit.SECONDS), equalTo(true)); + assertThat(notifiedOfResultLatch.await(5, TimeUnit.SECONDS), equalTo(true)); + } + + { + CountDownLatch notifiedOfResultLatch = new CountDownLatch(1); + enrichCache.computeIfAbsent(searchRequest1, (searchRequest, searchResponseActionListener) -> { + fail("Expected no call to the database because item should have been in the cache"); + }, new ActionListener<>() { + @Override + public void onResponse(List> maps) { + notifiedOfResultLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + fail(e); + } + }); + assertThat(notifiedOfResultLatch.await(5, TimeUnit.SECONDS), equalTo(true)); + } + } + + private SearchResponse convertToSearchResponse(List> searchResponseList) { + SearchHit[] hitArray = searchResponseList.stream().map(map -> { + try { + return SearchHit.unpooled(0, "id").sourceRef(convertMapToJson(map)); + } catch (IOException e) { + throw new RuntimeException(e); + } + }).toArray(SearchHit[]::new); + SearchHits hits = SearchHits.unpooled(hitArray, null, 0); + return new SearchResponse( + hits, + null, + null, + false, + false, + null, + 1, + null, + 5, + 4, + 0, + randomLong(), + null, + SearchResponse.Clusters.EMPTY + ); + } + + private BytesReference convertMapToJson(Map simpleMap) throws IOException { + try (XContentBuilder builder = JsonXContent.contentBuilder().map(simpleMap)) { + return BytesReference.bytes(builder); + } + } + public void testDeepCopy() { Map original = new HashMap<>(); { diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistry.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistry.java index c57650541b416..e4ce4d8181fd8 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistry.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistry.java @@ -54,6 +54,9 @@ public class ConnectorTemplateRegistry extends IndexTemplateRegistry { public static final String ENT_SEARCH_GENERIC_PIPELINE_NAME = "ent-search-generic-ingestion"; public static final String ENT_SEARCH_GENERIC_PIPELINE_FILE = "generic_ingestion_pipeline"; + public static final String SEARCH_DEFAULT_PIPELINE_NAME = "search-default-ingestion"; + public static final String SEARCH_DEFAULT_PIPELINE_FILE = "search_default_pipeline"; + // Resource config public static final String ROOT_RESOURCE_PATH = "/entsearch/"; public static final String ROOT_TEMPLATE_RESOURCE_PATH = ROOT_RESOURCE_PATH + "connector/"; @@ -115,6 +118,12 @@ protected List getIngestPipelines() { ROOT_RESOURCE_PATH + ENT_SEARCH_GENERIC_PIPELINE_FILE + ".json", REGISTRY_VERSION, TEMPLATE_VERSION_VARIABLE + ), + new JsonIngestPipelineConfig( + SEARCH_DEFAULT_PIPELINE_NAME, + ROOT_RESOURCE_PATH + SEARCH_DEFAULT_PIPELINE_FILE + ".json", + REGISTRY_VERSION, + TEMPLATE_VERSION_VARIABLE ) ); } diff --git a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/DataStreamAndIndexLifecycleMixingTests.java b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/DataStreamAndIndexLifecycleMixingTests.java index b9c58f728d1e3..7278b0e6c7f49 100644 --- a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/DataStreamAndIndexLifecycleMixingTests.java +++ b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/DataStreamAndIndexLifecycleMixingTests.java @@ -16,7 +16,9 @@ import org.elasticsearch.action.datastreams.CreateDataStreamAction; import org.elasticsearch.action.datastreams.GetDataStreamAction; import org.elasticsearch.action.datastreams.GetDataStreamAction.Response.ManagedBy; +import org.elasticsearch.action.datastreams.lifecycle.ExplainDataStreamLifecycleAction; import org.elasticsearch.action.datastreams.lifecycle.ExplainIndexDataStreamLifecycle; +import org.elasticsearch.action.datastreams.lifecycle.PutDataStreamLifecycleAction; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.DataStream; @@ -28,8 +30,6 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.datastreams.DataStreamsPlugin; import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleService; -import org.elasticsearch.datastreams.lifecycle.action.ExplainDataStreamLifecycleAction; -import org.elasticsearch.datastreams.lifecycle.action.PutDataStreamLifecycleAction; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.DateFieldMapper; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/common/AdjustableCapacityBlockingQueue.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/common/AdjustableCapacityBlockingQueue.java index e73151b44a3e4..ea600488ea8f6 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/common/AdjustableCapacityBlockingQueue.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/common/AdjustableCapacityBlockingQueue.java @@ -145,6 +145,40 @@ public E take() throws InterruptedException { } } + public E peek() { + final ReentrantReadWriteLock.ReadLock readLock = lock.readLock(); + + readLock.lock(); + try { + var oldItem = prioritizedReadingQueue.peek(); + + if (oldItem != null) { + return oldItem; + } + + return currentQueue.peek(); + } finally { + readLock.unlock(); + } + } + + public E poll() { + final ReentrantReadWriteLock.ReadLock readLock = lock.readLock(); + + readLock.lock(); + try { + var oldItem = prioritizedReadingQueue.poll(); + + if (oldItem != null) { + return oldItem; + } + + return currentQueue.poll(); + } finally { + readLock.unlock(); + } + } + /** * Returns the number of elements stored in the queue. If the capacity was recently changed, the value returned could be * greater than the capacity. This occurs when the capacity was reduced and there were more elements in the queue than the diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/common/RateLimiter.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/common/RateLimiter.java index ac28aa87f554b..bbc5082d45004 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/common/RateLimiter.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/common/RateLimiter.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.inference.common; import org.elasticsearch.common.Strings; +import org.elasticsearch.core.TimeValue; import java.time.Clock; import java.time.Instant; @@ -92,24 +93,59 @@ public final synchronized void setRate(double newAccumulatedTokensLimit, double * @throws InterruptedException _ */ public void acquire(int tokens) throws InterruptedException { + sleeper.sleep(reserveInternal(tokens)); + } + + /** + * Returns the amount of time to wait for the tokens to become available but does not reserve them in advance. + * A caller will need to call {@link #reserve(int)} or {@link #acquire(int)} after this call. + * @param tokens the number of items of work that should be throttled, typically you'd pass a value of 1 here. Must be greater than 0. + * @return the amount of time to wait + */ + public TimeValue timeToReserve(int tokens) { + var timeToReserveRes = timeToReserveInternal(tokens); + + return new TimeValue((long) timeToReserveRes.microsToWait, TimeUnit.MICROSECONDS); + } + + private TimeToReserve timeToReserveInternal(int tokens) { + validateTokenRequest(tokens); + + double microsToWait; + accumulateTokens(); + var accumulatedTokensToUse = Math.min(tokens, accumulatedTokens); + var additionalTokensRequired = tokens - accumulatedTokensToUse; + microsToWait = additionalTokensRequired / tokensPerMicros; + + return new TimeToReserve(microsToWait, accumulatedTokensToUse); + } + + private record TimeToReserve(double microsToWait, double accumulatedTokensToUse) {} + + private static void validateTokenRequest(int tokens) { if (tokens <= 0) { throw new IllegalArgumentException("Requested tokens must be positive"); } + } - double microsToWait; - synchronized (this) { - accumulateTokens(); - var accumulatedTokensToUse = Math.min(tokens, accumulatedTokens); - var additionalTokensRequired = tokens - accumulatedTokensToUse; - microsToWait = additionalTokensRequired / tokensPerMicros; - accumulatedTokens -= accumulatedTokensToUse; - nextTokenAvailability = nextTokenAvailability.plus((long) microsToWait, ChronoUnit.MICROS); - } + /** + * Returns the amount of time to wait for the tokens to become available. + * @param tokens the number of items of work that should be throttled, typically you'd pass a value of 1 here. Must be greater than 0. + * @return the amount of time to wait + */ + public TimeValue reserve(int tokens) { + return new TimeValue(reserveInternal(tokens), TimeUnit.MICROSECONDS); + } + + private synchronized long reserveInternal(int tokens) { + var timeToReserveRes = timeToReserveInternal(tokens); + accumulatedTokens -= timeToReserveRes.accumulatedTokensToUse; + nextTokenAvailability = nextTokenAvailability.plus((long) timeToReserveRes.microsToWait, ChronoUnit.MICROS); - sleeper.sleep((long) microsToWait); + return (long) timeToReserveRes.microsToWait; } - private void accumulateTokens() { + private synchronized void accumulateTokens() { var now = Instant.now(clock); if (now.isAfter(nextTokenAvailability)) { var elapsedTimeMicros = microsBetweenExact(nextTokenAvailability, now); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/common/AdjustableCapacityBlockingQueueTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/common/AdjustableCapacityBlockingQueueTests.java index 09cd065ce3cd0..5a70b98313f7c 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/common/AdjustableCapacityBlockingQueueTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/common/AdjustableCapacityBlockingQueueTests.java @@ -233,6 +233,65 @@ public void testTake_RemovesItemFromQueue() throws InterruptedException { assertThat(queue.size(), is(0)); } + public void testPeek_ReturnsItemWithoutRemoving() { + var queue = new AdjustableCapacityBlockingQueue<>(QUEUE_CREATOR, 1); + assertThat(queue.size(), is(0)); + + queue.offer(0); + assertThat(queue.size(), is(1)); + assertThat(queue.peek(), is(0)); + assertThat(queue.size(), is(1)); + assertThat(queue.peek(), is(0)); + } + + public void testPeek_ExistingItem_RemainsAtFront_AfterCapacityChange() throws InterruptedException { + var queue = new AdjustableCapacityBlockingQueue<>(QUEUE_CREATOR, 1); + queue.offer(0); + assertThat(queue.size(), is(1)); + assertThat(queue.remainingCapacity(), is(0)); + assertThat(queue.peek(), is(0)); + + queue.setCapacity(2); + assertThat(queue.remainingCapacity(), is(1)); + assertThat(queue.peek(), is(0)); + + queue.offer(1); + assertThat(queue.peek(), is(0)); + assertThat(queue.take(), is(0)); + assertThat(queue.peek(), is(1)); + } + + public void testPoll_ReturnsNull_WhenNoItemsAreAvailable() { + var queue = new AdjustableCapacityBlockingQueue<>(QUEUE_CREATOR, 1); + assertNull(queue.poll()); + } + + public void testPoll_ReturnsFirstElement() { + var queue = new AdjustableCapacityBlockingQueue<>(QUEUE_CREATOR, 1); + queue.offer(0); + assertThat(queue.poll(), is(0)); + assertThat(queue.size(), is(0)); + assertThat(queue.remainingCapacity(), is(1)); + } + + public void testPoll_ReturnsFirstElement_AfterCapacityIncrease() { + var queue = new AdjustableCapacityBlockingQueue<>(QUEUE_CREATOR, 1); + queue.offer(0); + queue.setCapacity(2); + queue.offer(1); + + assertThat(queue.remainingCapacity(), is(0)); + assertThat(queue.size(), is(2)); + + assertThat(queue.poll(), is(0)); + assertThat(queue.size(), is(1)); + assertThat(queue.remainingCapacity(), is(1)); + + assertThat(queue.poll(), is(1)); + assertThat(queue.size(), is(0)); + assertThat(queue.remainingCapacity(), is(2)); + } + public static AdjustableCapacityBlockingQueue.QueueCreator mockQueueCreator(BlockingQueue backingQueue) { return new AdjustableCapacityBlockingQueue.QueueCreator<>() { @Override diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/common/RateLimiterTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/common/BaseRateLimiterTests.java similarity index 67% rename from x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/common/RateLimiterTests.java rename to x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/common/BaseRateLimiterTests.java index 46931f12aaf4f..d012f135839c6 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/common/RateLimiterTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/common/BaseRateLimiterTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.inference.common; import org.elasticsearch.common.Strings; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESTestCase; import java.time.Clock; @@ -17,11 +18,19 @@ import static org.hamcrest.Matchers.is; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -public class RateLimiterTests extends ESTestCase { +public abstract class BaseRateLimiterTests extends ESTestCase { + + protected abstract TimeValue tokenMethod(RateLimiter limiter, int tokens) throws InterruptedException; + + protected abstract void sleepValidationMethod( + TimeValue result, + RateLimiter.Sleeper mockSleeper, + int numberOfClassToExpect, + long expectedMicrosecondsToSleep + ) throws InterruptedException; + public void testThrows_WhenAccumulatedTokensLimit_IsNegative() { var exception = expectThrows( IllegalArgumentException.class, @@ -65,19 +74,19 @@ public void testThrows_WhenTokensPerTimeUnit_IsNegative() { assertThat(exception.getMessage(), is("Tokens per time unit must be greater than 0")); } - public void testAcquire_Throws_WhenTokens_IsZero() { + public void testMethod_Throws_WhenTokens_IsZero() { var limiter = new RateLimiter(0, 1, TimeUnit.SECONDS, new RateLimiter.TimeUnitSleeper(), Clock.systemUTC()); var exception = expectThrows(IllegalArgumentException.class, () -> limiter.acquire(0)); assertThat(exception.getMessage(), is("Requested tokens must be positive")); } - public void testAcquire_Throws_WhenTokens_IsNegative() { + public void testMethod_Throws_WhenTokens_IsNegative() { var limiter = new RateLimiter(0, 1, TimeUnit.SECONDS, new RateLimiter.TimeUnitSleeper(), Clock.systemUTC()); var exception = expectThrows(IllegalArgumentException.class, () -> limiter.acquire(-1)); assertThat(exception.getMessage(), is("Requested tokens must be positive")); } - public void testAcquire_First_CallDoesNotSleep() throws InterruptedException { + public void testMethod_First_CallDoesNotSleep() throws InterruptedException { var now = Clock.systemUTC().instant(); var clock = mock(Clock.class); when(clock.instant()).thenReturn(now); @@ -85,11 +94,11 @@ public void testAcquire_First_CallDoesNotSleep() throws InterruptedException { var sleeper = mock(RateLimiter.Sleeper.class); var limiter = new RateLimiter(1, 1, TimeUnit.MINUTES, sleeper, clock); - limiter.acquire(1); - verify(sleeper, times(1)).sleep(0); + var res = tokenMethod(limiter, 1); + sleepValidationMethod(res, sleeper, 1, 0); } - public void testAcquire_DoesNotSleep_WhenTokenRateIsHigh() throws InterruptedException { + public void testMethod_DoesNotSleep_WhenTokenRateIsHigh() throws InterruptedException { var now = Clock.systemUTC().instant(); var clock = mock(Clock.class); when(clock.instant()).thenReturn(now); @@ -97,11 +106,11 @@ public void testAcquire_DoesNotSleep_WhenTokenRateIsHigh() throws InterruptedExc var sleeper = mock(RateLimiter.Sleeper.class); var limiter = new RateLimiter(0, Double.MAX_VALUE, TimeUnit.MICROSECONDS, sleeper, clock); - limiter.acquire(1); - verify(sleeper, times(1)).sleep(0); + var res = tokenMethod(limiter, 1); + sleepValidationMethod(res, sleeper, 1, 0); } - public void testAcquire_AcceptsMaxIntValue_WhenTokenRateIsHigh() throws InterruptedException { + public void testMethod_AcceptsMaxIntValue_WhenTokenRateIsHigh() throws InterruptedException { var now = Clock.systemUTC().instant(); var clock = mock(Clock.class); when(clock.instant()).thenReturn(now); @@ -109,11 +118,11 @@ public void testAcquire_AcceptsMaxIntValue_WhenTokenRateIsHigh() throws Interrup var sleeper = mock(RateLimiter.Sleeper.class); var limiter = new RateLimiter(0, Double.MAX_VALUE, TimeUnit.MICROSECONDS, sleeper, clock); - limiter.acquire(Integer.MAX_VALUE); - verify(sleeper, times(1)).sleep(0); + var res = tokenMethod(limiter, Integer.MAX_VALUE); + sleepValidationMethod(res, sleeper, 1, 0); } - public void testAcquire_AcceptsMaxIntValue_WhenTokenRateIsLow() throws InterruptedException { + public void testMethod_AcceptsMaxIntValue_WhenTokenRateIsLow() throws InterruptedException { var now = Clock.systemUTC().instant(); var clock = mock(Clock.class); when(clock.instant()).thenReturn(now); @@ -122,13 +131,13 @@ public void testAcquire_AcceptsMaxIntValue_WhenTokenRateIsLow() throws Interrupt double tokensPerDay = 1; var limiter = new RateLimiter(0, tokensPerDay, TimeUnit.DAYS, sleeper, clock); - limiter.acquire(Integer.MAX_VALUE); + var res = tokenMethod(limiter, Integer.MAX_VALUE); double tokensPerMicro = tokensPerDay / TimeUnit.DAYS.toMicros(1); - verify(sleeper, times(1)).sleep((long) ((double) Integer.MAX_VALUE / tokensPerMicro)); + sleepValidationMethod(res, sleeper, 1, (long) ((double) Integer.MAX_VALUE / tokensPerMicro)); } - public void testAcquire_SleepsForOneMinute_WhenRequestingOneUnavailableToken() throws InterruptedException { + public void testMethod_SleepsForOneMinute_WhenRequestingOneUnavailableToken() throws InterruptedException { var now = Clock.systemUTC().instant(); var clock = mock(Clock.class); when(clock.instant()).thenReturn(now); @@ -136,11 +145,11 @@ public void testAcquire_SleepsForOneMinute_WhenRequestingOneUnavailableToken() t var sleeper = mock(RateLimiter.Sleeper.class); var limiter = new RateLimiter(1, 1, TimeUnit.MINUTES, sleeper, clock); - limiter.acquire(2); - verify(sleeper, times(1)).sleep(TimeUnit.MINUTES.toMicros(1)); + var res = tokenMethod(limiter, 2); + sleepValidationMethod(res, sleeper, 1, TimeUnit.MINUTES.toMicros(1)); } - public void testAcquire_SleepsForOneMinute_WhenRequestingOneUnavailableToken_NoAccumulated() throws InterruptedException { + public void testMethod_SleepsForOneMinute_WhenRequestingOneUnavailableToken_NoAccumulated() throws InterruptedException { var now = Clock.systemUTC().instant(); var clock = mock(Clock.class); when(clock.instant()).thenReturn(now); @@ -148,11 +157,11 @@ public void testAcquire_SleepsForOneMinute_WhenRequestingOneUnavailableToken_NoA var sleeper = mock(RateLimiter.Sleeper.class); var limiter = new RateLimiter(0, 1, TimeUnit.MINUTES, sleeper, clock); - limiter.acquire(1); - verify(sleeper, times(1)).sleep(TimeUnit.MINUTES.toMicros(1)); + var res = tokenMethod(limiter, 1); + sleepValidationMethod(res, sleeper, 1, TimeUnit.MINUTES.toMicros(1)); } - public void testAcquire_SleepsFor10Minute_WhenRequesting10UnavailableToken_NoAccumulated() throws InterruptedException { + public void testMethod_SleepsFor10Minute_WhenRequesting10UnavailableToken_NoAccumulated() throws InterruptedException { var now = Clock.systemUTC().instant(); var clock = mock(Clock.class); when(clock.instant()).thenReturn(now); @@ -160,11 +169,11 @@ public void testAcquire_SleepsFor10Minute_WhenRequesting10UnavailableToken_NoAcc var sleeper = mock(RateLimiter.Sleeper.class); var limiter = new RateLimiter(0, 1, TimeUnit.MINUTES, sleeper, clock); - limiter.acquire(10); - verify(sleeper, times(1)).sleep(TimeUnit.MINUTES.toMicros(10)); + var res = tokenMethod(limiter, 10); + sleepValidationMethod(res, sleeper, 1, TimeUnit.MINUTES.toMicros(10)); } - public void testAcquire_IncrementsNextTokenAvailabilityInstant_ByOneMinute() throws InterruptedException { + public void testMethod_IncrementsNextTokenAvailabilityInstant_ByOneMinute() throws InterruptedException { var now = Clock.systemUTC().instant(); var clock = mock(Clock.class); when(clock.instant()).thenReturn(now); @@ -172,12 +181,12 @@ public void testAcquire_IncrementsNextTokenAvailabilityInstant_ByOneMinute() thr var sleeper = mock(RateLimiter.Sleeper.class); var limiter = new RateLimiter(0, 1, TimeUnit.MINUTES, sleeper, clock); - limiter.acquire(1); - verify(sleeper, times(1)).sleep(TimeUnit.MINUTES.toMicros(1)); + var res = tokenMethod(limiter, 1); + sleepValidationMethod(res, sleeper, 1, TimeUnit.MINUTES.toMicros(1)); assertThat(limiter.getNextTokenAvailability(), is(now.plus(1, ChronoUnit.MINUTES))); } - public void testAcquire_SecondCallToAcquire_ShouldWait_WhenAccumulatedTokensAreDepleted() throws InterruptedException { + public void testMethod_SecondCallToAcquire_ShouldWait_WhenAccumulatedTokensAreDepleted() throws InterruptedException { var now = Clock.systemUTC().instant(); var clock = mock(Clock.class); when(clock.instant()).thenReturn(now); @@ -185,13 +194,14 @@ public void testAcquire_SecondCallToAcquire_ShouldWait_WhenAccumulatedTokensAreD var sleeper = mock(RateLimiter.Sleeper.class); var limiter = new RateLimiter(1, 1, TimeUnit.MINUTES, sleeper, clock); - limiter.acquire(1); - verify(sleeper, times(1)).sleep(0); - limiter.acquire(1); - verify(sleeper, times(1)).sleep(TimeUnit.MINUTES.toMicros(1)); + + var res = tokenMethod(limiter, 1); + sleepValidationMethod(res, sleeper, 1, 0); + res = tokenMethod(limiter, 1); + sleepValidationMethod(res, sleeper, 1, TimeUnit.MINUTES.toMicros(1)); } - public void testAcquire_SecondCallToAcquire_ShouldWaitForHalfDuration_WhenElapsedTimeIsHalfRequiredDuration() + public void testMethod_SecondCallToAcquire_ShouldWaitForHalfDuration_WhenElapsedTimeIsHalfRequiredDuration() throws InterruptedException { var now = Clock.systemUTC().instant(); var clock = mock(Clock.class); @@ -200,14 +210,15 @@ public void testAcquire_SecondCallToAcquire_ShouldWaitForHalfDuration_WhenElapse var sleeper = mock(RateLimiter.Sleeper.class); var limiter = new RateLimiter(1, 1, TimeUnit.MINUTES, sleeper, clock); - limiter.acquire(1); - verify(sleeper, times(1)).sleep(0); + + var res = tokenMethod(limiter, 1); + sleepValidationMethod(res, sleeper, 1, 0); when(clock.instant()).thenReturn(now.plus(Duration.ofSeconds(30))); - limiter.acquire(1); - verify(sleeper, times(1)).sleep(TimeUnit.SECONDS.toMicros(30)); + res = tokenMethod(limiter, 1); + sleepValidationMethod(res, sleeper, 1, TimeUnit.SECONDS.toMicros(30)); } - public void testAcquire_ShouldAccumulateTokens() throws InterruptedException { + public void testMethod_ShouldAccumulateTokens() throws InterruptedException { var now = Clock.systemUTC().instant(); var clock = mock(Clock.class); when(clock.instant()).thenReturn(now); @@ -215,11 +226,12 @@ public void testAcquire_ShouldAccumulateTokens() throws InterruptedException { var sleeper = mock(RateLimiter.Sleeper.class); var limiter = new RateLimiter(10, 10, TimeUnit.MINUTES, sleeper, clock); - limiter.acquire(5); - verify(sleeper, times(1)).sleep(0); + + var res = tokenMethod(limiter, 5); + sleepValidationMethod(res, sleeper, 1, 0); // it should accumulate 5 tokens when(clock.instant()).thenReturn(now.plus(Duration.ofSeconds(30))); - limiter.acquire(10); - verify(sleeper, times(2)).sleep(0); + res = tokenMethod(limiter, 10); + sleepValidationMethod(res, sleeper, 2, 0); } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/common/RateLimiterAcquireTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/common/RateLimiterAcquireTests.java new file mode 100644 index 0000000000000..1f59fa7bb5bad --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/common/RateLimiterAcquireTests.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.common; + +import org.elasticsearch.core.TimeValue; + +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + +public class RateLimiterAcquireTests extends BaseRateLimiterTests { + + @Override + protected TimeValue tokenMethod(RateLimiter limiter, int tokens) throws InterruptedException { + limiter.acquire(tokens); + return null; + } + + @Override + protected void sleepValidationMethod( + TimeValue result, + RateLimiter.Sleeper mockSleeper, + int numberOfClassToExpect, + long expectedMicrosecondsToSleep + ) throws InterruptedException { + verify(mockSleeper, times(numberOfClassToExpect)).sleep(expectedMicrosecondsToSleep); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/common/RateLimiterReserveTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/common/RateLimiterReserveTests.java new file mode 100644 index 0000000000000..5c32c6c560e7b --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/common/RateLimiterReserveTests.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.common; + +import org.elasticsearch.core.TimeValue; + +import static org.hamcrest.Matchers.is; + +public class RateLimiterReserveTests extends BaseRateLimiterTests { + + @Override + protected TimeValue tokenMethod(RateLimiter limiter, int tokens) { + return limiter.reserve(tokens); + } + + @Override + protected void sleepValidationMethod( + TimeValue result, + RateLimiter.Sleeper mockSleeper, + int numberOfClassToExpect, + long expectedMicrosecondsToSleep + ) { + assertThat(result.getMicros(), is(expectedMicrosecondsToSleep)); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/common/RateLimiterTimeToReserveTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/common/RateLimiterTimeToReserveTests.java new file mode 100644 index 0000000000000..a69846d67c4e3 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/common/RateLimiterTimeToReserveTests.java @@ -0,0 +1,142 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.common; + +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.test.ESTestCase; + +import java.time.Clock; +import java.time.Duration; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class RateLimiterTimeToReserveTests extends ESTestCase { + public void testTimeToReserve_Returns_1Second() { + var now = Clock.systemUTC().instant(); + var clock = mock(Clock.class); + when(clock.instant()).thenReturn(now); + + var sleeper = mock(RateLimiter.Sleeper.class); + + var limiter = new RateLimiter(0, 1, TimeUnit.SECONDS, sleeper, clock); + var timeToWait = limiter.timeToReserve(1); + assertThat(timeToWait, is(TimeValue.timeValueSeconds(1))); + } + + public void testTimeToReserve_Returns_1Second_WithoutReservingToken() { + var now = Clock.systemUTC().instant(); + var clock = mock(Clock.class); + when(clock.instant()).thenReturn(now); + + var sleeper = mock(RateLimiter.Sleeper.class); + + var limiter = new RateLimiter(0, 1, TimeUnit.SECONDS, sleeper, clock); + var timeToWait = limiter.timeToReserve(1); + assertThat(timeToWait, is(TimeValue.timeValueSeconds(1))); + + timeToWait = limiter.timeToReserve(1); + assertThat(timeToWait, is(TimeValue.timeValueSeconds(1))); + } + + public void testTimeToReserve_Returns_0Seconds_WhenTokenIsAlreadyAvailable() { + var now = Clock.systemUTC().instant(); + var clock = mock(Clock.class); + when(clock.instant()).thenReturn(now); + + var sleeper = mock(RateLimiter.Sleeper.class); + + var limiter = new RateLimiter(1, 1, TimeUnit.SECONDS, sleeper, clock); + var timeToWait = limiter.timeToReserve(1); + assertThat(timeToWait, is(TimeValue.timeValueSeconds(0))); + } + + public void testTimeToReserve_Returns_0Seconds_WhenTokenIsAlreadyAvailable_WithoutReservingToken() { + var now = Clock.systemUTC().instant(); + var clock = mock(Clock.class); + when(clock.instant()).thenReturn(now); + + var sleeper = mock(RateLimiter.Sleeper.class); + + var limiter = new RateLimiter(1, 1, TimeUnit.SECONDS, sleeper, clock); + var timeToWait = limiter.timeToReserve(1); + assertThat(timeToWait, is(TimeValue.timeValueSeconds(0))); + + timeToWait = limiter.timeToReserve(1); + assertThat(timeToWait, is(TimeValue.timeValueSeconds(0))); + } + + public void testTimeToReserve_Returns_1Seconds_When1TokenIsAlreadyAvailable_ButRequires2Tokens() { + var now = Clock.systemUTC().instant(); + var clock = mock(Clock.class); + when(clock.instant()).thenReturn(now); + + var sleeper = mock(RateLimiter.Sleeper.class); + + var limiter = new RateLimiter(1, 1, TimeUnit.SECONDS, sleeper, clock); + var timeToWait = limiter.timeToReserve(2); + assertThat(timeToWait, is(TimeValue.timeValueSeconds(1))); + } + + public void testTimeToReserve_Returns_1Seconds_When1TokenIsAlreadyAvailable_ButRequires2Tokens_WithoutReservingToken() { + var now = Clock.systemUTC().instant(); + var clock = mock(Clock.class); + when(clock.instant()).thenReturn(now); + + var sleeper = mock(RateLimiter.Sleeper.class); + + var limiter = new RateLimiter(1, 1, TimeUnit.SECONDS, sleeper, clock); + var timeToWait = limiter.timeToReserve(2); + assertThat(timeToWait, is(TimeValue.timeValueSeconds(1))); + + timeToWait = limiter.timeToReserve(2); + assertThat(timeToWait, is(TimeValue.timeValueSeconds(1))); + } + + public void testTimeToReserve_Returns_0Seconds_WhenTimeAdvancesToAccumulate2Tokens() { + var now = Clock.systemUTC().instant(); + var clock = mock(Clock.class); + when(clock.instant()).thenReturn(now); + + var sleeper = mock(RateLimiter.Sleeper.class); + + var limiter = new RateLimiter(2, 1, TimeUnit.SECONDS, sleeper, clock); + // drain the accumulated tokens + var drainedTokensTime = limiter.reserve(2); + assertThat(drainedTokensTime, is(TimeValue.timeValueSeconds(0))); + + when(clock.instant()).thenReturn(now.plus(Duration.ofSeconds(2))); + // 2 tokens should now be available + var timeToWait = limiter.timeToReserve(2); + assertThat(timeToWait, is(TimeValue.timeValueSeconds(0))); + } + + public void testTimeToReserve_Returns_0Seconds_WhenTimeAdvancesToAccumulate2Tokens_MethodCallDoesNotReserveTokens() { + var now = Clock.systemUTC().instant(); + var clock = mock(Clock.class); + when(clock.instant()).thenReturn(now); + + var sleeper = mock(RateLimiter.Sleeper.class); + + var limiter = new RateLimiter(2, 1, TimeUnit.SECONDS, sleeper, clock); + // drain the accumulated tokens + var drainedTokensTime = limiter.reserve(2); + assertThat(drainedTokensTime, is(TimeValue.timeValueSeconds(0))); + + when(clock.instant()).thenReturn(now.plus(Duration.ofSeconds(2))); + // 2 tokens should now be available + var timeToWait = limiter.timeToReserve(2); + assertThat(timeToWait, is(TimeValue.timeValueSeconds(0))); + + // 2 tokens should still be available + timeToWait = limiter.timeToReserve(2); + assertThat(timeToWait, is(TimeValue.timeValueSeconds(0))); + } +}