diff --git a/docs/build.gradle b/docs/build.gradle index 08cb2de971320..d13f4ca3b2edb 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -1146,3 +1146,42 @@ buildRestTests.setups['kibana_sample_data_ecommerce'] = ''' number_of_shards: 1 number_of_replicas: 0 ''' +buildRestTests.setups['setup_logdata'] = ''' + - do: + indices.create: + index: logdata + body: + settings: + number_of_shards: 1 + number_of_replicas: 1 + mappings: + properties: + grade: + type: byte + - do: + bulk: + index: logdata + refresh: true + body: | + {"index":{}} + {"grade": 100, "weight": 2} + {"index":{}} + {"grade": 50, "weight": 3} +''' +buildRestTests.setups['logdata_job'] = buildRestTests.setups['setup_logdata'] + ''' + - do: + ml.put_data_frame_analytics: + id: "loganalytics" + body: > + { + "source": { + "index": "logdata" + }, + "dest": { + "index": "logdata_out" + }, + "analysis": { + "outlier_detection": {} + } + } +''' diff --git a/docs/reference/data-frames/apis/preview-transform.asciidoc b/docs/reference/data-frames/apis/preview-transform.asciidoc index a4338d0ef60b3..42e01e557ddd3 100644 --- a/docs/reference/data-frames/apis/preview-transform.asciidoc +++ b/docs/reference/data-frames/apis/preview-transform.asciidoc @@ -30,6 +30,15 @@ Previews a {dataframe-transform}. {stack-ov}/security-privileges.html[Security privileges] and {stack-ov}/built-in-roles.html[Built-in roles]. +[discrete] +[[preview-data-frame-transform-desc]] +==== {api-description-title} + +This API generates a preview of the results that you will get when you run the +<> with the same +configuration. It returns a maximum of 100 results. The calculations are based +on all the current data in the source index. + [discrete] [[preview-data-frame-transform-request-body]] ==== {api-request-body-title} @@ -40,6 +49,15 @@ Previews a {dataframe-transform}. `pivot` (Required):: (object) Defines the pivot function `group by` fields and the aggregation to reduce the data. See <>. + +[discrete] +[[preview-data-frame-transform-response]] +==== {api-response-body-title} + +`preview`:: + (array) An array of documents. In particular, they are the JSON + representation of the documents that would be created in the destination index + by the {dataframe-transform}. [discrete] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/delete-dfanalytics.asciidoc b/docs/reference/ml/apis/delete-dfanalytics.asciidoc new file mode 100644 index 0000000000000..9904cf1fa49dc --- /dev/null +++ b/docs/reference/ml/apis/delete-dfanalytics.asciidoc @@ -0,0 +1,52 @@ +[role="xpack"] +[testenv="platinum"] +[[delete-dfanalytics]] +=== Delete {dfanalytics-jobs} API +[subs="attributes"] +++++ +Delete {dfanalytics-jobs} +++++ + +experimental[] + +Deletes an existing {dfanalytics-job}. + +[[ml-delete-dfanalytics-request]] +==== {api-request-title} + +`DELETE _ml/data_frame/analytics/` + +[[ml-delete-dfanalytics-prereq]] +==== {api-prereq-title} + +* You must have `machine_learning_admin` built-in role to use this API. For more +information, see {stack-ov}/security-privileges.html[Security privileges] and +{stack-ov}/built-in-roles.html[Built-in roles]. + +[[ml-delete-dfanalytics-path-params]] +==== {api-path-parms-title} + +`` (Required):: + (string) Identifier for the {dfanalytics-job} you want to delete. + +[[ml-delete-dfanalytics-example]] +==== {api-examples-title} + +The following example deletes the `loganalytics` {dfanalytics-job}: + +[source,js] +-------------------------------------------------- +DELETE _ml/data_frame/analytics/loganalytics +-------------------------------------------------- +// CONSOLE +// TEST[skip:TBD] + +The API returns the following result: + +[source,js] +---- +{ + "acknowledged" : true +} +---- +// TESTRESPONSE \ No newline at end of file diff --git a/docs/reference/ml/apis/evaluate-dfanalytics.asciidoc b/docs/reference/ml/apis/evaluate-dfanalytics.asciidoc new file mode 100644 index 0000000000000..9c779f939e201 --- /dev/null +++ b/docs/reference/ml/apis/evaluate-dfanalytics.asciidoc @@ -0,0 +1,105 @@ +[role="xpack"] +[testenv="platinum"] +[[evaluate-dfanalytics]] +=== Evaluate {dfanalytics} API + +[subs="attributes"] +++++ +Evaluate {dfanalytics} +++++ + +experimental[] + +Evaluates the executed analysis on an index that is already annotated with a +field that contains the results of the analytics (the `ground truth`) for each +{dataframe} row. Evaluation is typically done via calculating a set of metrics +that capture various aspects of the quality of the results over the data for +which we have the `ground truth`. For different types of analyses different +metrics are suitable. This API packages together commonly used metrics for +various analyses. + +[[ml-evaluate-dfanalytics-request]] +==== {api-request-title} + +`POST _ml/data_frame/_evaluate` + +[[ml-evaluate-dfanalytics-prereq]] +==== {api-prereq-title} + +* You must have `monitor_ml` privilege to use this API. For more +information, see {stack-ov}/security-privileges.html[Security privileges] and +{stack-ov}/built-in-roles.html[Built-in roles]. + +[[ml-evaluate-dfanalytics-request-body]] +==== {api-request-body-title} + +`index` (Required):: + (object) Defines the `index` in which the evaluation will be performed. + +`evaluation` (Required):: + (object) Defines the type of evaluation you want to perform. For example: + `binary_soft_classification`. + See Evaluate API resources. + +[[ml-evaluate-dfanalytics-example]] +==== {api-examples-title} + +[source,js] +-------------------------------------------------- +POST _ml/data_frame/_evaluate +{ + "index": "my_analytics_dest_index", + "evaluation": { + "binary_soft_classification": { + "actual_field": "is_outlier", + "predicted_probability_field": "ml.outlier_score" + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[skip:TBD] + +The API returns the following results: + +[source,js] +---- +{ + "binary_soft_classification": { + "auc_roc": { + "score": 0.92584757746414444 + }, + "confusion_matrix": { + "0.25": { + "tp": 5, + "fp": 9, + "tn": 204, + "fn": 5 + }, + "0.5": { + "tp": 1, + "fp": 5, + "tn": 208, + "fn": 9 + }, + "0.75": { + "tp": 0, + "fp": 4, + "tn": 209, + "fn": 10 + } + }, + "precision": { + "0.25": 0.35714285714285715, + "0.5": 0.16666666666666666, + "0.75": 0 + }, + "recall": { + "0.25": 0.5, + "0.5": 0.1, + "0.75": 0 + } + } +} +---- +// TESTRESPONSE \ No newline at end of file diff --git a/docs/reference/ml/apis/get-dfanalytics-stats.asciidoc b/docs/reference/ml/apis/get-dfanalytics-stats.asciidoc new file mode 100644 index 0000000000000..01014a11e4d9e --- /dev/null +++ b/docs/reference/ml/apis/get-dfanalytics-stats.asciidoc @@ -0,0 +1,91 @@ +[role="xpack"] +[testenv="platinum"] +[[get-dfanalytics-stats]] +=== Get {dfanalytics-jobs} statistics API +[subs="attributes"] +++++ +Get {dfanalytics-jobs} stats +++++ + +experimental[] + +Retrieves usage information for {dfanalytics-jobs}. + +[[ml-get-dfanalytics-stats-request]] +==== {api-request-title} + +`GET _ml/data_frame/analytics//_stats` + + +`GET _ml/data_frame/analytics/,/_stats` + + +`GET _ml/data_frame/analytics/_stats` + + +`GET _ml/data_frame/analytics/_all/_stats` + + +`GET _ml/data_frame/analytics/*/_stats` + +[[ml-get-dfanalytics-stats-prereq]] +==== {api-prereq-title} + +* You must have `monitor_ml` privilege to use this API. For more +information, see {stack-ov}/security-privileges.html[Security privileges] and +{stack-ov}/built-in-roles.html[Built-in roles]. + +[[ml-get-dfanalytics-stats-path-params]] +==== {api-path-parms-title} + +`` (Optional):: + (string) Identifier for the {dfanalytics-job}. If you do not specify one of + these options, the API returns information for the first hundred + {dfanalytics-jobs}. + +`allow_no_match` (Optional) + (boolean) If `false` and the `data_frame_analytics_id` does not match any + {dfanalytics-job} an error will be returned. The default value is `true`. + +[[ml-get-dfanalytics-stats-query-params]] +==== {api-query-parms-title} + +`from` (Optional):: + (integer) Skips the specified number of {dfanalytics-jobs}. The default value + is `0`. + +`size` (Optional):: + (integer) Specifies the maximum number of {dfanalytics-jobs} to obtain. The + default value is `100`. + +[discrete] +[[ml-get-dfanalytics-stats-response-body]] +==== {api-response-body-title} + +The API returns the following information: + +`data_frame_analytics`:: + (array) An array of statistics objects for {dfanalytics-jobs}, which are + sorted by the `id` value in ascending order. + +[[ml-get-dfanalytics-stats-example]] +==== {api-examples-title} + +[source,js] +-------------------------------------------------- +GET _ml/data_frame/analytics/loganalytics/_stats +-------------------------------------------------- +// CONSOLE +// TEST[skip:TBD] + +The API returns the following results: + +[source,js] +---- +{ + "count": 1, + "data_frame_analytics": [ + { + "id": "loganalytics", + "state": "stopped" + } + ] +} +---- +// TESTRESPONSE diff --git a/docs/reference/ml/apis/get-dfanalytics.asciidoc b/docs/reference/ml/apis/get-dfanalytics.asciidoc new file mode 100644 index 0000000000000..edf14060cad12 --- /dev/null +++ b/docs/reference/ml/apis/get-dfanalytics.asciidoc @@ -0,0 +1,106 @@ +[role="xpack"] +[testenv="platinum"] +[[get-dfanalytics]] +=== Get {dfanalytics-jobs} API +[subs="attributes"] +++++ +Get {dfanalytics-jobs} +++++ + +experimental[] + +Retrieves configuration information for {dfanalytics-jobs}. + +[[ml-get-dfanalytics-request]] +==== {api-request-title} + +`GET _ml/data_frame/analytics/` + + +`GET _ml/data_frame/analytics/,` + + +`GET _ml/data_frame/analytics/` + + +`GET _ml/data_frame/analytics/_all` + +[[ml-get-dfanalytics-prereq]] +==== {api-prereq-title} + +* You must have `monitor_ml` privilege to use this API. For more +information, see {stack-ov}/security-privileges.html[Security privileges] and +{stack-ov}/built-in-roles.html[Built-in roles]. + +[[ml-get-dfanalytics-desc]] +==== {api-description-title} + +You can get information for multiple {dfanalytics-jobs} in a single API request +by using a comma-separated list of {dfanalytics-jobs} or a wildcard expression. +You can get information for all {dfanalytics-jobs} by using _all, by specifying +`*` as the ``, or by omitting the +``. + +[[ml-get-dfanalytics-path-params]] +==== {api-path-parms-title} + +`` (Optional):: + (string) Identifier for the {dfanalytics-job}. If you do not specify one of + these options, the API returns information for the first hundred + {dfanalytics-jobs}. + +`allow_no_match` (Optional) + (boolean) If `false` and the `data_frame_analytics_id` does not match any + {dfanalytics-job} an error will be returned. The default value is `true`. + +[[ml-get-dfanalytics-query-params]] +==== {api-query-parms-title} + +`from` (Optional):: + (integer) Skips the specified number of {dfanalytics-jobs}. The default value + is `0`. + +`size` (Optional):: + (integer) Specifies the maximum number of {dfanalytics-jobs} to obtain. The + default value is `100`. + +[[ml-get-dfanalytics-example]] +==== {api-examples-title} + +The following example gets configuration information for the `loganalytics` +{dfanalytics-job}: + +[source,js] +-------------------------------------------------- +GET _ml/data_frame/analytics/loganalytics +-------------------------------------------------- +// CONSOLE +// TEST[skip:TBD] + +The API returns the following results: + +[source,js] +---- +{ + "count": 1, + "data_frame_analytics": [ + { + "id": "loganalytics", + "source": { + "index": "logdata", + "query": { + "match_all": {} + } + }, + "dest": { + "index": "logdata_out", + "results_field": "ml" + }, + "analysis": { + "outlier_detection": {} + }, + "model_memory_limit": "1gb", + "create_time": 1562265491319, + "version" : "8.0.0" + } + ] +} +---- +// TESTRESPONSE diff --git a/docs/reference/ml/apis/ml-api.asciidoc b/docs/reference/ml/apis/ml-api.asciidoc index 7933dea85ce0a..54a7b4e60c198 100644 --- a/docs/reference/ml/apis/ml-api.asciidoc +++ b/docs/reference/ml/apis/ml-api.asciidoc @@ -34,7 +34,16 @@ machine learning APIs and in advanced job configuration options in Kibana. * <> * <> +[float] +[[ml-api-dfanalytics-endpoint]] +=== {dfanalytics-cap} APIs +* <> or +<> +* <> or +<> +* <> or <> +* <> [float] [[ml-api-job-endpoint]] @@ -104,6 +113,7 @@ include::put-calendar.asciidoc[] include::put-datafeed.asciidoc[] include::put-filter.asciidoc[] include::put-job.asciidoc[] +include::put-dfanalytics.asciidoc[] //DELETE include::delete-calendar.asciidoc[] include::delete-datafeed.asciidoc[] @@ -114,6 +124,9 @@ include::delete-job.asciidoc[] include::delete-calendar-job.asciidoc[] include::delete-snapshot.asciidoc[] include::delete-expired-data.asciidoc[] +include::delete-dfanalytics.asciidoc[] +//EVALUATE +include::evaluate-dfanalytics.asciidoc[] //FIND include::find-file-structure.asciidoc[] //FLUSH @@ -135,6 +148,8 @@ include::get-snapshot.asciidoc[] include::get-calendar-event.asciidoc[] include::get-filter.asciidoc[] include::get-record.asciidoc[] +include::get-dfanalytics.asciidoc[] +include::get-dfanalytics-stats.asciidoc[] //OPEN include::open-job.asciidoc[] //POST @@ -146,7 +161,9 @@ include::revert-snapshot.asciidoc[] //SET/START/STOP include::set-upgrade-mode.asciidoc[] include::start-datafeed.asciidoc[] +include::start-dfanalytics.asciidoc[] include::stop-datafeed.asciidoc[] +include::stop-dfanalytics.asciidoc[] //UPDATE include::update-datafeed.asciidoc[] include::update-filter.asciidoc[] diff --git a/docs/reference/ml/apis/put-dfanalytics.asciidoc b/docs/reference/ml/apis/put-dfanalytics.asciidoc new file mode 100644 index 0000000000000..8499950c2fb17 --- /dev/null +++ b/docs/reference/ml/apis/put-dfanalytics.asciidoc @@ -0,0 +1,128 @@ +[role="xpack"] +[testenv="platinum"] +[[put-dfanalytics]] +=== Create {dfanalytics-jobs} API +[subs="attributes"] +++++ +Create {dfanalytics-jobs} +++++ + +experimental[] + +Instantiates a {dfanalytics-job}. + +[[ml-put-dfanalytics-request]] +==== {api-request-title} + +`PUT _ml/data_frame/analytics/` + +[[ml-put-dfanalytics-prereq]] +==== {api-prereq-title} + +* You must have `machine_learning_admin` built-in role to use this API. You must +also have `read` and `view_index_metadata` privileges on the source index and +`read`, `create_index`, and `index` privileges on the destination index. For +more information, see {stack-ov}/security-privileges.html[Security privileges] +and {stack-ov}/built-in-roles.html[Built-in roles]. + +[[ml-put-dfanalytics-desc]] +==== {api-description-title} + +This API creates a {dfanalytics-job} that performs an analysis on the source +index and stores the outcome in a destination index. + +The destination index will be automatically created if it does not exist. The +`index.number_of_shards` and `index.number_of_replicas` settings of the source +index will be copied over the destination index. When the source index matches +multiple indices, these settings will be set to the maximum values found in the +source indices. + +The mappings of the source indices are also attempted to be copied over +to the destination index, however, if the mappings of any of the fields don't +match among the source indices, the attempt will fail with an error message. + +If the destination index already exists, then it will be use as is. This makes +it possible to set up the destination index in advance with custom settings +and mappings. + +[[ml-put-dfanalytics-path-params]] +==== {api-path-parms-title} + +`` (Required):: + (string) A numerical character string that uniquely identifies the + {dfanalytics-job}. This identifier can contain lowercase alphanumeric characters + (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric + characters. + +[[ml-put-dfanalytics-request-body]] +==== {api-request-body-title} + +`source` (Required):: + (object) The source configuration, consisting of `index` and optionally a + `query`. + +`dest` (Required):: + (object) The destination configuration, consisting of `index` and optionally + `results_field` (`ml` by default). + +`analysis` (Required):: + (object) Defines the type of {dfanalytics} you want to perform on your source + index. For example: `outlier_detection`. + See {oldetection} resources. + +`analyzed_fields` (Optional):: + (object) You can specify both `includes` and/or `excludes` patterns. If + `analyzed_fields` is not set, only the relevant fileds will be included. For + example all the numeric fields for {oldetection}. + +[[ml-put-dfanalytics-example]] +==== {api-examples-title} + +The following example creates the `loganalytics` {dfanalytics-job}, the analysis +type is `outlier_detection`: + +[source,js] +-------------------------------------------------- +PUT _ml/data_frame/analytics/loganalytics +{ + "source": { + "index": "logdata" + }, + "dest": { + "index": "logdata_out" + }, + "analysis": { + "outlier_detection": { + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:setup_logdata] + +The API returns the following result: + +[source,js] +---- +{ + "id": "loganalytics", + "source": { + "index": ["logdata"], + "query": { + "match_all": {} + } + }, + "dest": { + "index": "logdata_out", + "results_field": "ml" + }, + "analysis": { + "outlier_detection": {} + }, + "model_memory_limit": "1gb", + "create_time" : 1562265491319, + "version" : "8.0.0" +} +---- +// TESTRESPONSE[s/1562265491319/$body.$_path/] +// TESTRESPONSE[s/"version": "8.0.0"/"version": $body.version/] \ No newline at end of file diff --git a/docs/reference/ml/apis/start-dfanalytics.asciidoc b/docs/reference/ml/apis/start-dfanalytics.asciidoc new file mode 100644 index 0000000000000..4b2c774ae3b26 --- /dev/null +++ b/docs/reference/ml/apis/start-dfanalytics.asciidoc @@ -0,0 +1,61 @@ +[role="xpack"] +[testenv="platinum"] +[[start-dfanalytics]] +=== Start {dfanalytics-jobs} API + +[subs="attributes"] +++++ +Start {dfanalytics-jobs} +++++ + +experimental[] + +Starts a {dfanalytics-job}. + +[[ml-start-dfanalytics-request]] +==== {api-request-title} + +`POST _ml/data_frame/analytics//_start` + +[[ml-start-dfanalytics-prereq]] +==== {api-prereq-title} + +* You must have `machine_learning_admin` built-in role to use this API. You must +also have `read` and `view_index_metadata` privileges on the source index and +`read`, `create_index`, and `index` privileges on the destination index. For +more information, see {stack-ov}/security-privileges.html[Security privileges] +and {stack-ov}/built-in-roles.html[Built-in roles]. + +[[ml-start-dfanalytics-path-params]] +==== {api-path-parms-title} + +`` (Required):: + (string) Identifier for the {dfanalytics-job}. This identifier can contain + lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It + must start and end with alphanumeric characters. + +`timeout` (Optional):: + (time) Controls the amount of time to wait until the {dfanalytics-job} starts. + The default value is 20 seconds. + +[[ml-start-dfanalytics-example]] +==== {api-examples-title} + +The following example starts the `loganalytics` {dfanalytics-job}: + +[source,js] +-------------------------------------------------- +POST _ml/data_frame/analytics/loganalytics/_start +-------------------------------------------------- +// CONSOLE +// TEST[skip:setup:logdata_job] + +When the {dfanalytics-job} starts, you receive the following results: + +[source,js] +---- +{ + "acknowledged" : true +} +---- +// TESTRESPONSE \ No newline at end of file diff --git a/docs/reference/ml/apis/stop-dfanalytics.asciidoc b/docs/reference/ml/apis/stop-dfanalytics.asciidoc new file mode 100644 index 0000000000000..70b1d8454f8e1 --- /dev/null +++ b/docs/reference/ml/apis/stop-dfanalytics.asciidoc @@ -0,0 +1,81 @@ +[role="xpack"] +[testenv="platinum"] +[[stop-dfanalytics]] +=== Stop {dfanalytics-jobs} API + +[subs="attributes"] +++++ +Stop {dfanalytics-jobs} +++++ + +experimental[] + +Stops one or more {dfanalytics-jobs}. + +[[ml-stop-dfanalytics-request]] +==== {api-request-title} + +`POST _ml/data_frame/analytics//_stop` + + +`POST _ml/data_frame/analytics/,/_stop` + + +`POST _ml/data_frame/analytics/_all/_stop` + +[[ml-stop-dfanalytics-prereq]] +==== {api-prereq-title} + +* You must have `machine_learning_admin` built-in role to use this API. For more +information, see {stack-ov}/security-privileges.html[Security privileges] and +{stack-ov}/built-in-roles.html[Built-in roles]. + +[[ml-stop-dfanalytics-desc]] +==== {api-description-title} + +A {dfanalytics-job} can be started and stopped multiple times throughout its +lifecycle. + +You can stop multiple {dfanalytics-jobs} in a single API request by using a +comma-separated list of {dfanalytics-jobs} or a wildcard expression. You can +stop all {dfanalytics-job} by using _all or by specifying * as the +. + +[[ml-stop-dfanalytics-path-params]] +==== {api-path-parms-title} + +`` (Required):: + (string) Identifier for the {dfanalytics-job}. This identifier can contain + lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It + must start and end with alphanumeric characters. + +`timeout` (Optional):: + Controls the amount of time to wait until the {dfanalytics-job} stops. + The default value is 20 seconds. + +`force` (Optional):: + (boolean) If true, the {dfanalytics-job} is stopped forcefully. + +`allow_no_match` (Optional) + (boolean) If `false` and the `data_frame_analytics_id` does not match any + {dfanalytics-job} an error will be returned. The default value is `true`. + +[[ml-stop-dfanalytics-example]] +==== {api-examples-title} + +The following example stops the `loganalytics` {dfanalytics-job}: + +[source,js] +-------------------------------------------------- +POST _ml/data_frame/analytics/loganalytics/_stop +-------------------------------------------------- +// CONSOLE +// TEST[skip:TBD] + +When the {dfanalytics-job} stops, you receive the following results: + +[source,js] +---- +{ + "stopped" : true +} +---- +// TESTRESPONSE \ No newline at end of file diff --git a/docs/reference/modules/remote-clusters.asciidoc b/docs/reference/modules/remote-clusters.asciidoc index 52fe0ba42064e..c6bfb16c797b3 100644 --- a/docs/reference/modules/remote-clusters.asciidoc +++ b/docs/reference/modules/remote-clusters.asciidoc @@ -75,20 +75,22 @@ nodes. For example: cluster: remote: cluster_one: <1> - seeds: 127.0.0.1:9300 - transport.ping_schedule: 30s <2> + seeds: 127.0.0.1:9300 <2> + transport.ping_schedule: 30s <3> cluster_two: seeds: 127.0.0.1:9301 - transport.compress: true <3> - skip_unavailable: true <4> + transport.compress: true <4> + skip_unavailable: true <5> -------------------------------- <1> `cluster_one` and `cluster_two` are arbitrary _cluster aliases_ representing the connection to each cluster. These names are subsequently used to distinguish between local and remote indices. -<2> A keep-alive ping is configured for `cluster_one`. -<3> Compression is explicitly enabled for requests to `cluster_two`. -<4> Disconnected remote clusters are optional for `cluster_two`. +<2> The hostname and <> port (default: 9300) of a +seed node in the remote cluster. +<3> A keep-alive ping is configured for `cluster_one`. +<4> Compression is explicitly enabled for requests to `cluster_two`. +<5> Disconnected remote clusters are optional for `cluster_two`. For more information about the optional transport settings, see <>. diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexStatus.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexStatus.java index ba85849598060..1a78b2d7c65d7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexStatus.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexStatus.java @@ -58,7 +58,7 @@ public class SnapshotIndexStatus implements Iterable, stats = new SnapshotStats(); for (SnapshotIndexShardStatus shard : shards) { indexShards.put(shard.getShardId().getId(), shard); - stats.add(shard.getStats()); + stats.add(shard.getStats(), true); } shardsStats = new SnapshotShardsStats(shards); this.indexShards = unmodifiableMap(indexShards); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStats.java index ba4028620aabd..512df16068c24 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStats.java @@ -296,7 +296,12 @@ public static SnapshotStats fromXContent(XContentParser parser) throws IOExcepti processedSize); } - void add(SnapshotStats stats) { + /** + * Add stats instance to the total + * @param stats Stats instance to add + * @param updateTimestamps Whether or not start time and duration should be updated + */ + void add(SnapshotStats stats, boolean updateTimestamps) { incrementalFileCount += stats.incrementalFileCount; totalFileCount += stats.totalFileCount; processedFileCount += stats.processedFileCount; @@ -309,7 +314,7 @@ void add(SnapshotStats stats) { // First time here startTime = stats.startTime; time = stats.time; - } else { + } else if (updateTimestamps) { // The time the last snapshot ends long endTime = Math.max(startTime + time, stats.startTime + stats.time); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java index b850330a9affa..613887274b3ca 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java @@ -19,11 +19,12 @@ package org.elasticsearch.action.admin.cluster.snapshots.status; +import org.elasticsearch.Version; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.SnapshotsInProgress.State; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; @@ -71,14 +72,14 @@ public class SnapshotStatus implements ToXContentObject, Streamable { @Nullable private Boolean includeGlobalState; - SnapshotStatus(final Snapshot snapshot, final State state, final List shards, - final Boolean includeGlobalState) { + SnapshotStatus(Snapshot snapshot, State state, List shards, Boolean includeGlobalState, + long startTime, long time) { this.snapshot = Objects.requireNonNull(snapshot); this.state = Objects.requireNonNull(state); this.shards = Objects.requireNonNull(shards); this.includeGlobalState = includeGlobalState; shardsStats = new SnapshotShardsStats(shards); - updateShardStats(); + updateShardStats(startTime, time); } private SnapshotStatus(Snapshot snapshot, State state, List shards, @@ -169,7 +170,16 @@ public void readFrom(StreamInput in) throws IOException { } shards = Collections.unmodifiableList(builder); includeGlobalState = in.readOptionalBoolean(); - updateShardStats(); + final long startTime; + final long time; + if (in.getVersion().onOrAfter(Version.V_8_0_0)) { + startTime = in.readLong(); + time = in.readLong(); + } else { + startTime = 0L; + time = 0L; + } + updateShardStats(startTime, time); } @Override @@ -181,6 +191,10 @@ public void writeTo(StreamOutput out) throws IOException { shard.writeTo(out); } out.writeOptionalBoolean(includeGlobalState); + if (out.getVersion().onOrAfter(Version.V_8_0_0)) { + out.writeLong(stats.getStartTime()); + out.writeLong(stats.getTime()); + } } /** @@ -281,11 +295,12 @@ public static SnapshotStatus fromXContent(XContentParser parser) throws IOExcept return PARSER.parse(parser, null); } - private void updateShardStats() { - stats = new SnapshotStats(); + private void updateShardStats(long startTime, long time) { + stats = new SnapshotStats(startTime, time, 0, 0, 0, 0, 0, 0); shardsStats = new SnapshotShardsStats(shards); for (SnapshotIndexShardStatus shard : shards) { - stats.add(shard.getStats()); + // BWC: only update timestamps when we did not get a start time from an old node + stats.add(shard.getStats(), startTime == 0L); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java index 234bc51b95b25..764cb51034be5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java @@ -187,7 +187,8 @@ private SnapshotsStatusResponse buildResponse(SnapshotsStatusRequest request, Li shardStatusBuilder.add(shardStatus); } builder.add(new SnapshotStatus(entry.snapshot(), entry.state(), - Collections.unmodifiableList(shardStatusBuilder), entry.includeGlobalState())); + Collections.unmodifiableList(shardStatusBuilder), entry.includeGlobalState(), entry.startTime(), + Math.max(threadPool.absoluteTimeInMillis() - entry.startTime(), 0L))); } } // Now add snapshots on disk that are not currently running @@ -240,8 +241,10 @@ private SnapshotsStatusResponse buildResponse(SnapshotsStatusRequest request, Li default: throw new IllegalArgumentException("Unknown snapshot state " + snapshotInfo.state()); } + final long startTime = snapshotInfo.startTime(); builder.add(new SnapshotStatus(new Snapshot(repositoryName, snapshotId), state, - Collections.unmodifiableList(shardStatusBuilder), snapshotInfo.includeGlobalState())); + Collections.unmodifiableList(shardStatusBuilder), snapshotInfo.includeGlobalState(), + startTime, snapshotInfo.endTime() - startTime)); } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java b/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java index c6c2e4604a7d3..92a80e808709b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java @@ -38,7 +38,6 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.BytesTransportRequest; @@ -71,6 +70,13 @@ public class PublicationTransportHandler { private AtomicReference lastSeenClusterState = new AtomicReference<>(); + // the master needs the original non-serialized state as the cluster state contains some volatile information that we + // don't want to be replicated because it's not usable on another node (e.g. UnassignedInfo.unassignedTimeNanos) or + // because it's mostly just debugging info that would unnecessarily blow up CS updates (I think there was one in + // snapshot code). + // TODO: look into these and check how to get rid of them + private AtomicReference currentPublishRequestToSelf = new AtomicReference<>(); + private final AtomicLong fullClusterStateReceivedCount = new AtomicLong(); private final AtomicLong incompatibleClusterStateDiffReceivedCount = new AtomicLong(); private final AtomicLong compatibleClusterStateDiffReceivedCount = new AtomicLong(); @@ -153,32 +159,32 @@ public PublicationContext newPublicationContext(ClusterChangedEvent clusterChang return new PublicationContext() { @Override public void sendPublishRequest(DiscoveryNode destination, PublishRequest publishRequest, - ActionListener responseActionListener) { + ActionListener originalListener) { assert publishRequest.getAcceptedState() == clusterChangedEvent.state() : "state got switched on us"; + final ActionListener responseActionListener; if (destination.equals(nodes.getLocalNode())) { - // the master needs the original non-serialized state as the cluster state contains some volatile information that we - // don't want to be replicated because it's not usable on another node (e.g. UnassignedInfo.unassignedTimeNanos) or - // because it's mostly just debugging info that would unnecessarily blow up CS updates (I think there was one in - // snapshot code). - // TODO: look into these and check how to get rid of them - transportService.getThreadPool().generic().execute(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - // wrap into fake TransportException, as that's what we expect in Publication - responseActionListener.onFailure(new TransportException(e)); - } - + // if publishing to self, use original request instead (see currentPublishRequestToSelf for explanation) + final PublishRequest previousRequest = currentPublishRequestToSelf.getAndSet(publishRequest); + assert previousRequest == null; + responseActionListener = new ActionListener() { @Override - protected void doRun() { - responseActionListener.onResponse(handlePublishRequest.apply(publishRequest)); + public void onResponse(PublishWithJoinResponse publishWithJoinResponse) { + final PublishRequest previousRequest = currentPublishRequestToSelf.getAndSet(null); + assert previousRequest == publishRequest; + originalListener.onResponse(publishWithJoinResponse); } @Override - public String toString() { - return "publish to self of " + publishRequest; + public void onFailure(Exception e) { + final PublishRequest previousRequest = currentPublishRequestToSelf.getAndSet(null); + assert previousRequest == publishRequest; + originalListener.onFailure(e); } - }); - } else if (sendFullVersion || !previousState.nodes().nodeExists(destination)) { + }; + } else { + responseActionListener = originalListener; + } + if (sendFullVersion || !previousState.nodes().nodeExists(destination)) { logger.trace("sending full cluster state version {} to {}", newState.version(), destination); PublicationTransportHandler.this.sendFullClusterState(newState, serializedStates, destination, responseActionListener); } else { @@ -267,10 +273,6 @@ private static void buildDiffAndSerializeStates(ClusterState clusterState, Clust Map serializedDiffs) { Diff diff = null; for (DiscoveryNode node : discoveryNodes) { - if (node.equals(discoveryNodes.getLocalNode())) { - // ignore, see newPublicationContext - continue; - } try { if (sendFullVersion || !previousState.nodes().nodeExists(node)) { if (serializedStates.containsKey(node.getVersion()) == false) { @@ -356,7 +358,7 @@ private PublishWithJoinResponse handleIncomingPublishRequest(BytesTransportReque fullClusterStateReceivedCount.incrementAndGet(); logger.debug("received full cluster state version [{}] with size [{}]", incomingState.version(), request.bytes().length()); - final PublishWithJoinResponse response = handlePublishRequest.apply(new PublishRequest(incomingState)); + final PublishWithJoinResponse response = acceptState(incomingState); lastSeenClusterState.set(incomingState); return response; } else { @@ -366,7 +368,7 @@ private PublishWithJoinResponse handleIncomingPublishRequest(BytesTransportReque incompatibleClusterStateDiffReceivedCount.incrementAndGet(); throw new IncompatibleClusterStateVersionException("have no local cluster state"); } else { - final ClusterState incomingState; + ClusterState incomingState; try { Diff diff = ClusterState.readDiffFrom(in, lastSeen.nodes().getLocalNode()); incomingState = diff.apply(lastSeen); // might throw IncompatibleClusterStateVersionException @@ -380,7 +382,7 @@ private PublishWithJoinResponse handleIncomingPublishRequest(BytesTransportReque compatibleClusterStateDiffReceivedCount.incrementAndGet(); logger.debug("received diff cluster state version [{}] with uuid [{}], diff size [{}]", incomingState.version(), incomingState.stateUUID(), request.bytes().length()); - final PublishWithJoinResponse response = handlePublishRequest.apply(new PublishRequest(incomingState)); + final PublishWithJoinResponse response = acceptState(incomingState); lastSeenClusterState.compareAndSet(lastSeen, incomingState); return response; } @@ -389,4 +391,17 @@ private PublishWithJoinResponse handleIncomingPublishRequest(BytesTransportReque IOUtils.close(in); } } + + private PublishWithJoinResponse acceptState(ClusterState incomingState) { + // if the state is coming from the current node, use original request instead (see currentPublishRequestToSelf for explanation) + if (transportService.getLocalNode().equals(incomingState.nodes().getMasterNode())) { + final PublishRequest publishRequest = currentPublishRequestToSelf.get(); + if (publishRequest == null || publishRequest.getAcceptedState().stateUUID().equals(incomingState.stateUUID()) == false) { + throw new IllegalStateException("publication to self failed for " + publishRequest); + } else { + return handlePublishRequest.apply(publishRequest); + } + } + return handlePublishRequest.apply(new PublishRequest(incomingState)); + } } diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java index 184b936784c0c..c9fe1d31bd1d5 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java @@ -150,7 +150,7 @@ public void writeTo(final StreamOutput out) throws IOException { @Override public String toString() { - return "Request{" + + return "RetentionLeaseBackgroundSyncAction.Request{" + "retentionLeases=" + retentionLeases + ", shardId=" + shardId + ", timeout=" + timeout + diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java index 1c4f1b74cbccf..052e72185c501 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java @@ -154,7 +154,7 @@ public void writeTo(final StreamOutput out) throws IOException { @Override public String toString() { - return "Request{" + + return "RetentionLeaseSyncAction.Request{" + "retentionLeases=" + retentionLeases + ", shardId=" + shardId + ", timeout=" + timeout + diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 3f0e996849824..9fbb33dda6ddc 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -561,7 +561,7 @@ public SnapshotInfo finalizeSnapshot(final SnapshotId snapshotId, final Map userMetadata) { SnapshotInfo blobStoreSnapshot = new SnapshotInfo(snapshotId, indices.stream().map(IndexId::getName).collect(Collectors.toList()), - startTime, failure, System.currentTimeMillis(), totalShards, shardFailures, + startTime, failure, threadPool.absoluteTimeInMillis(), totalShards, shardFailures, includeGlobalState, userMetadata); try { final RepositoryData updatedRepositoryData = getRepositoryData().addSnapshot(snapshotId, blobStoreSnapshot.state(), indices); @@ -853,7 +853,7 @@ private void writeAtomic(final String blobName, final BytesReference bytesRef, b public void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus) { final ShardId shardId = store.shardId(); - final long startTime = threadPool.relativeTimeInMillis(); + final long startTime = threadPool.absoluteTimeInMillis(); try { logger.debug("[{}] [{}] snapshot to [{}] ...", shardId, snapshotId, metadata.name()); @@ -953,7 +953,7 @@ public void snapshotShard(Store store, MapperService mapperService, SnapshotId s lastSnapshotStatus.getStartTime(), // snapshotStatus.startTime() is assigned on the same machine, // so it's safe to use the relative time in millis - threadPool.relativeTimeInMillis() - lastSnapshotStatus.getStartTime(), + threadPool.absoluteTimeInMillis() - lastSnapshotStatus.getStartTime(), lastSnapshotStatus.getIncrementalFileCount(), lastSnapshotStatus.getIncrementalSize() ); @@ -976,9 +976,9 @@ public void snapshotShard(Store store, MapperService mapperService, SnapshotId s // finalize the snapshot and rewrite the snapshot index with the next sequential snapshot index finalizeShard(newSnapshotsList, fileListGeneration, blobs, "snapshot creation [" + snapshotId + "]", shardContainer, shardId, snapshotId); - snapshotStatus.moveToDone(threadPool.relativeTimeInMillis()); + snapshotStatus.moveToDone(threadPool.absoluteTimeInMillis()); } catch (Exception e) { - snapshotStatus.moveToFailed(threadPool.relativeTimeInMillis(), ExceptionsHelper.detailedMessage(e)); + snapshotStatus.moveToFailed(threadPool.absoluteTimeInMillis(), ExceptionsHelper.detailedMessage(e)); if (e instanceof IndexShardSnapshotFailedException) { throw (IndexShardSnapshotFailedException) e; } else { diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index 2783d635c90a4..6812c37f4cd37 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -285,7 +285,7 @@ public ClusterState execute(ClusterState currentState) { request.partial(), State.INIT, snapshotIndices, - System.currentTimeMillis(), + threadPool.absoluteTimeInMillis(), repositoryData.getGenId(), null, request.userMetadata()); @@ -1169,7 +1169,7 @@ public ClusterState execute(ClusterState currentState) { // add the snapshot deletion to the cluster state SnapshotDeletionsInProgress.Entry entry = new SnapshotDeletionsInProgress.Entry( snapshot, - System.currentTimeMillis(), + threadPool.absoluteTimeInMillis(), repositoryStateId ); if (deletionsInProgress != null) { diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java index ca03e59a75858..6b6ce18cfefbb 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java @@ -176,7 +176,6 @@ boolean isSkipUnavailable() { @Override public void onNodeDisconnected(DiscoveryNode node) { boolean remove = connectedNodes.remove(node); - logger.trace("node disconnected: {}, removed: {}", node, remove); if (remove && connectedNodes.size() < maxNumRemoteConnections) { // try to reconnect and fill up the slot of the disconnected node connectHandler.forceConnect(); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatusTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatusTests.java index dbd45640c7b69..41fdb34c0b79c 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatusTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatusTests.java @@ -50,7 +50,7 @@ public void testToString() throws Exception { List snapshotIndexShardStatuses = new ArrayList<>(); snapshotIndexShardStatuses.add(snapshotIndexShardStatus); boolean includeGlobalState = randomBoolean(); - SnapshotStatus status = new SnapshotStatus(snapshot, state, snapshotIndexShardStatuses, includeGlobalState); + SnapshotStatus status = new SnapshotStatus(snapshot, state, snapshotIndexShardStatuses, includeGlobalState, 0L, 0L); int initializingShards = 0; int startedShards = 0; @@ -166,7 +166,7 @@ protected SnapshotStatus createTestInstance() { snapshotIndexShardStatuses.add(snapshotIndexShardStatus); } boolean includeGlobalState = randomBoolean(); - return new SnapshotStatus(snapshot, state, snapshotIndexShardStatuses, includeGlobalState); + return new SnapshotStatus(snapshot, state, snapshotIndexShardStatuses, includeGlobalState, 0L, 0L); } @Override diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java index d7832f04d9589..26d32ce91f908 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java @@ -770,23 +770,12 @@ public void testDiffBasedPublishing() { for (ClusterNode cn : cluster.clusterNodes) { assertThat(value(cn.getLastAppliedClusterState()), is(finalValue)); - if (cn == leader) { - // leader does not update publish stats as it's not using the serialized state - assertEquals(cn.toString(), prePublishStats.get(cn).getFullClusterStateReceivedCount(), - postPublishStats.get(cn).getFullClusterStateReceivedCount()); - assertEquals(cn.toString(), prePublishStats.get(cn).getCompatibleClusterStateDiffReceivedCount(), - postPublishStats.get(cn).getCompatibleClusterStateDiffReceivedCount()); - assertEquals(cn.toString(), prePublishStats.get(cn).getIncompatibleClusterStateDiffReceivedCount(), - postPublishStats.get(cn).getIncompatibleClusterStateDiffReceivedCount()); - } else { - // followers receive a diff - assertEquals(cn.toString(), prePublishStats.get(cn).getFullClusterStateReceivedCount(), - postPublishStats.get(cn).getFullClusterStateReceivedCount()); - assertEquals(cn.toString(), prePublishStats.get(cn).getCompatibleClusterStateDiffReceivedCount() + 1, - postPublishStats.get(cn).getCompatibleClusterStateDiffReceivedCount()); - assertEquals(cn.toString(), prePublishStats.get(cn).getIncompatibleClusterStateDiffReceivedCount(), - postPublishStats.get(cn).getIncompatibleClusterStateDiffReceivedCount()); - } + assertEquals(cn.toString(), prePublishStats.get(cn).getFullClusterStateReceivedCount(), + postPublishStats.get(cn).getFullClusterStateReceivedCount()); + assertEquals(cn.toString(), prePublishStats.get(cn).getCompatibleClusterStateDiffReceivedCount() + 1, + postPublishStats.get(cn).getCompatibleClusterStateDiffReceivedCount()); + assertEquals(cn.toString(), prePublishStats.get(cn).getIncompatibleClusterStateDiffReceivedCount(), + postPublishStats.get(cn).getIncompatibleClusterStateDiffReceivedCount()); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/ZenDiscoveryIT.java b/server/src/test/java/org/elasticsearch/cluster/coordination/ZenDiscoveryIT.java index 61bf1fdcc7896..e44805744aa5c 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/ZenDiscoveryIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/ZenDiscoveryIT.java @@ -28,7 +28,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -49,6 +48,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; @@ -150,21 +150,6 @@ public EnumSet context() { } public void testDiscoveryStats() throws Exception { - String expectedStatsJsonResponse = "{\n" + - " \"discovery\" : {\n" + - " \"cluster_state_queue\" : {\n" + - " \"total\" : 0,\n" + - " \"pending\" : 0,\n" + - " \"committed\" : 0\n" + - " },\n" + - " \"published_cluster_states\" : {\n" + - " \"full_states\" : 0,\n" + - " \"incompatible_diffs\" : 0,\n" + - " \"compatible_diffs\" : 0\n" + - " }\n" + - " }\n" + - "}"; - internalCluster().startNode(); ensureGreen(); // ensures that all events are processed (in particular state recovery fully completed) assertBusy(() -> @@ -182,15 +167,13 @@ public void testDiscoveryStats() throws Exception { assertThat(stats.getQueueStats().getPending(), equalTo(0)); assertThat(stats.getPublishStats(), notNullValue()); - assertThat(stats.getPublishStats().getFullClusterStateReceivedCount(), equalTo(0L)); + assertThat(stats.getPublishStats().getFullClusterStateReceivedCount(), greaterThanOrEqualTo(0L)); assertThat(stats.getPublishStats().getIncompatibleClusterStateDiffReceivedCount(), equalTo(0L)); - assertThat(stats.getPublishStats().getCompatibleClusterStateDiffReceivedCount(), equalTo(0L)); + assertThat(stats.getPublishStats().getCompatibleClusterStateDiffReceivedCount(), greaterThanOrEqualTo(0L)); XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint(); builder.startObject(); stats.toXContent(builder, ToXContent.EMPTY_PARAMS); builder.endObject(); - - assertThat(Strings.toString(builder), equalTo(expectedStatsJsonResponse)); } } diff --git a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 4d475fc7b0b1b..5cd03335dd964 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -1340,6 +1340,7 @@ public void testGetSnapshotsNoRepos() { assertTrue(getSnapshotsResponse.getSuccessfulResponses().isEmpty()); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/pull/43993") public void testGetSnapshotsMultipleRepos() { final Client client = client(); diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java new file mode 100644 index 0000000000000..15f4f72538ff7 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java @@ -0,0 +1,75 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.snapshots; + +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStatus; +import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.settings.Settings; + +import java.util.List; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; + +public class SnapshotStatusApisIT extends AbstractSnapshotIntegTestCase { + + public void testStatusApiConsistency() { + Client client = client(); + + logger.info("--> creating repository"); + assertAcked(client.admin().cluster().preparePutRepository("test-repo").setType("fs").setSettings( + Settings.builder().put("location", randomRepoPath()).build())); + + createIndex("test-idx-1", "test-idx-2", "test-idx-3"); + ensureGreen(); + + logger.info("--> indexing some data"); + for (int i = 0; i < 100; i++) { + index("test-idx-1", "_doc", Integer.toString(i), "foo", "bar" + i); + index("test-idx-2", "_doc", Integer.toString(i), "foo", "baz" + i); + index("test-idx-3", "_doc", Integer.toString(i), "foo", "baz" + i); + } + refresh(); + + logger.info("--> snapshot"); + CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap") + .setWaitForCompletion(true).get(); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), + equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); + + List snapshotInfos = + client.admin().cluster().prepareGetSnapshots("test-repo").get().getSnapshots("test-repo"); + assertThat(snapshotInfos.size(), equalTo(1)); + SnapshotInfo snapshotInfo = snapshotInfos.get(0); + assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); + assertThat(snapshotInfo.version(), equalTo(Version.CURRENT)); + + final List snapshotStatus = client.admin().cluster().snapshotsStatus( + new SnapshotsStatusRequest("test-repo", new String[]{"test-snap"})).actionGet().getSnapshots(); + assertThat(snapshotStatus.size(), equalTo(1)); + final SnapshotStatus snStatus = snapshotStatus.get(0); + assertEquals(snStatus.getStats().getStartTime(), snapshotInfo.startTime()); + assertEquals(snStatus.getStats().getTime(), snapshotInfo.endTime() - snapshotInfo.startTime()); + } +} diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java index 86c5e569f1eb5..b68fd50662ea2 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java @@ -34,7 +34,6 @@ import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -547,8 +546,7 @@ private ActionListener connectionListener(final CountDownLatch latch) { return ActionListener.wrap(x -> latch.countDown(), x -> fail()); } - @TestLogging("org.elasticsearch.transport:TRACE") // added for https://github.com/elastic/elasticsearch/issues/41067 - public void testCollectNodes() throws Exception { + public void testCollectNodes() throws InterruptedException, IOException { final Settings settings = Settings.EMPTY; final List knownNodes_c1 = new CopyOnWriteArrayList<>(); final List knownNodes_c2 = new CopyOnWriteArrayList<>(); @@ -667,7 +665,6 @@ public void onFailure(Exception e) { new ActionListener>() { @Override public void onResponse(BiFunction stringStringDiscoveryNodeBiFunction) { - logger.warn("unexpected call", new Exception("just for the stack trace")); try { fail("should not be called"); } finally { diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index 66a27d10780d4..9c3d79a04b531 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -200,7 +200,15 @@ public static void resetPortCounter() { portGenerator.set(0); } + // Allows distinguishing between parallel test processes + public static final int TEST_WORKER_VM; + + protected static final String TEST_WORKER_SYS_PROPERTY = "org.gradle.test.worker"; + static { + // org.gradle.test.worker starts counting at 1, but we want to start counting at 0 here + // in case system property is not defined (e.g. when running test from IDE), just use 0 + TEST_WORKER_VM = RandomizedTest.systemPropertyAsInt(TEST_WORKER_SYS_PROPERTY, 1) - 1; setTestSysProps(); LogConfigurator.loadLog4jPlugins(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index ef894c3cfd3ed..b198e4038e8e5 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -23,7 +23,6 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.carrotsearch.randomizedtesting.RandomizedTest; import com.carrotsearch.randomizedtesting.SeedUtils; -import com.carrotsearch.randomizedtesting.SysGlobals; import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.carrotsearch.randomizedtesting.generators.RandomStrings; @@ -506,8 +505,7 @@ private static Settings getRandomNodeSettings(long seed) { public static String clusterName(String prefix, long clusterSeed) { StringBuilder builder = new StringBuilder(prefix); - final int childVM = RandomizedTest.systemPropertyAsInt(SysGlobals.CHILDVM_SYSPROP_JVM_ID, 0); - builder.append("-CHILD_VM=[").append(childVM).append(']'); + builder.append("-TEST_WORKER_VM=[").append(ESTestCase.TEST_WORKER_VM).append(']'); builder.append("-CLUSTER_SEED=[").append(clusterSeed).append(']'); // if multiple maven task run on a single host we better have an identifier that doesn't rely on input params builder.append("-HASH=[").append(SeedUtils.formatSeed(System.nanoTime())).append(']'); diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java index 7cd706b3564ce..03cf33f73a52d 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java @@ -19,7 +19,6 @@ package org.elasticsearch.test.transport; -import com.carrotsearch.randomizedtesting.SysGlobals; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; @@ -46,6 +45,7 @@ import org.elasticsearch.node.Node; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.tasks.MockTaskManager; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.ConnectTransportException; @@ -92,7 +92,6 @@ public final class MockTransportService extends TransportService { private static final Logger logger = LogManager.getLogger(MockTransportService.class); private final Map> openConnections = new HashMap<>(); - private static final int JVM_ORDINAL = Integer.parseInt(System.getProperty(SysGlobals.CHILDVM_SYSPROP_JVM_ID, "0")); public static class TestPlugin extends Plugin { @Override @@ -112,7 +111,8 @@ public static MockNioTransport newMockTransport(Settings settings, Version versi // concurrent tests could claim port that another JVM just released and if that test tries to simulate a disconnect it might // be smart enough to re-connect depending on what is tested. To reduce the risk, since this is very hard to debug we use // a different default port range per JVM unless the incoming settings override it - int basePort = 10300 + (JVM_ORDINAL * 100); // use a non-default port otherwise some cluster in this JVM might reuse a port + // use a non-default base port otherwise some cluster in this JVM might reuse a port + int basePort = 10300 + (ESTestCase.TEST_WORKER_VM * 100); settings = Settings.builder().put(TransportSettings.PORT.getKey(), basePort + "-" + (basePort + 100)).put(settings).build(); NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(ClusterModule.getNamedWriteables()); return new MockNioTransport(settings, version, threadPool, new NetworkService(Collections.emptyList()), diff --git a/test/framework/src/test/java/org/elasticsearch/test/test/ESTestCaseTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/ESTestCaseTests.java index 0705eb32fc294..af8df361d4cf3 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/test/ESTestCaseTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/test/ESTestCaseTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.test.test; +import com.carrotsearch.randomizedtesting.RandomizedTest; import junit.framework.AssertionFailedError; import org.elasticsearch.common.bytes.BytesReference; @@ -181,4 +182,11 @@ public void testRandomValueOtherThan() { Supplier usuallyNull = () -> usually() ? null : randomInt(); assertNotNull(randomValueOtherThan(null, usuallyNull)); } + + public void testWorkerSystemProperty() { + assumeTrue("requires running tests with Gradle", System.getProperty("tests.gradle") != null); + // org.gradle.test.worker starts counting at 1 + assertThat(RandomizedTest.systemPropertyAsInt(TEST_WORKER_SYS_PROPERTY, -1), greaterThan(0)); + assertEquals(RandomizedTest.systemPropertyAsInt(TEST_WORKER_SYS_PROPERTY, -1) - 1, TEST_WORKER_VM); + } } diff --git a/x-pack/plugin/voting-only-node/src/test/java/org/elasticsearch/cluster/coordination/VotingOnlyNodePluginTests.java b/x-pack/plugin/voting-only-node/src/test/java/org/elasticsearch/cluster/coordination/VotingOnlyNodePluginTests.java index c55a405059a70..ce3c60b5ffe90 100644 --- a/x-pack/plugin/voting-only-node/src/test/java/org/elasticsearch/cluster/coordination/VotingOnlyNodePluginTests.java +++ b/x-pack/plugin/voting-only-node/src/test/java/org/elasticsearch/cluster/coordination/VotingOnlyNodePluginTests.java @@ -80,6 +80,17 @@ public void testBootstrapOnlyVotingOnlyNodes() throws Exception { equalTo(false)); } + public void testBootstrapOnlySingleVotingOnlyNode() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(0); + internalCluster().startNode(Settings.builder().put(VotingOnlyNodePlugin.VOTING_ONLY_NODE_SETTING.getKey(), true) + .put(Node.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s").build()); + internalCluster().startNode(); + assertBusy(() -> assertThat(client().admin().cluster().prepareState().get().getState().getNodes().getSize(), equalTo(2))); + assertThat( + VotingOnlyNodePlugin.isVotingOnlyNode(client().admin().cluster().prepareState().get().getState().nodes().getMasterNode()), + equalTo(false)); + } + public void testVotingOnlyNodesCannotBeMasterWithoutFullMasterNodes() throws Exception { internalCluster().setBootstrapMasterNodeIndex(0); internalCluster().startNode();