From 5a2ba34174c66bdc484fa907398448ca98f28140 Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 27 Mar 2019 23:08:10 +0000 Subject: [PATCH] Get node ID from nodes info in REST tests (#40052) (#40532) We discussed recently that the cluster state API should be considered "internal" and therefore our usual cast-iron stability guarantees do not hold for this API. However, there are a good number of REST tests that try to identify the master node. Today they call `GET /_cluster/state` API and extract the master node ID from the response. In fact many of these tests just want an arbitary node ID (or perhaps a data node ID) so an alternative is to call `GET _nodes` or `GET _nodes/data:true` and obtain a node ID from the keys of the `nodes` map in the response. This change adds the ability for YAML-based REST tests to extract an arbitrary key from a map so that they can obtain a node ID from the nodes info API instead of using the master node ID from the cluster state API. Relates #40047. --- .../cluster.allocation_explain/10_basic.yml | 8 - .../test/cluster.reroute/11_explain.yml | 10 +- .../test/indices.shrink/10_basic.yml | 18 +- .../test/indices.shrink/20_source_mapping.yml | 12 +- .../test/indices.shrink/30_copy_settings.yml | 18 +- .../test/indices.split/30_copy_settings.yml | 18 +- .../test/nodes.info/10_basic.yml | 13 +- .../test/nodes.info/20_transport.yml | 10 +- .../test/nodes.info/30_settings.yml | 13 +- .../test/nodes.stats/10_basic.yml | 11 +- .../test/nodes.stats/11_indices_metrics.yml | 308 +++++++++--------- .../nodes.stats/20_response_filtering.yml | 198 +++++------ .../test/nodes.stats/30_discovery.yml | 12 +- .../rest-api-spec/test/tasks.get/10_basic.yml | 3 - .../test/tasks.list/10_basic.yml | 12 +- .../test/rest/yaml/Features.java | 3 +- .../test/rest/yaml/ObjectPath.java | 14 +- .../test/rest/yaml/ObjectPathTests.java | 51 +++ 18 files changed, 406 insertions(+), 326 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yml index b8c922c98c15b..732a53aeea4f8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yml @@ -12,10 +12,6 @@ - match: { acknowledged: true } - - do: - cluster.state: - metric: [ master_node ] - - do: cluster.allocation_explain: body: { "index": "test", "shard": 0, "primary": true } @@ -37,10 +33,6 @@ index: test body: { "settings": { "index.number_of_shards": 1, "index.number_of_replicas": 9 } } - - do: - cluster.state: - metric: [ master_node ] - - do: cluster.allocation_explain: include_disk_info: true diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.reroute/11_explain.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.reroute/11_explain.yml index 5419acb9321f4..248b47d07a71e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.reroute/11_explain.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.reroute/11_explain.yml @@ -25,12 +25,14 @@ setup: --- "Explain API for non-existent node & shard": + - skip: + features: [arbitrary_key] - do: - cluster.state: - metric: [ master_node ] - - - set: {master_node: node_id} + nodes.info: + node_id: data:true + - set: + nodes._arbitrary_key_: node_id - do: cluster.reroute: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml index a6d6bb0730548..41c851b71cc6c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml @@ -3,18 +3,20 @@ - skip: version: " - 6.9.99" reason: expects warnings that pre-7.0.0 will not send - features: "warnings" - # creates an index with one document solely allocated on the master node + features: [warnings, arbitrary_key] + + # creates an index with one document solely allocated on a particular data node # and shrinks it into a new index with a single shard # we don't do the relocation to a single node after the index is created # here since in a mixed version cluster we can't identify # which node is the one with the highest version and that is the only one that can safely # be used to shrink the index. - - do: - cluster.state: {} - # Get master node id - - set: { master_node: master } + - do: + nodes.info: + node_id: data:true + - set: + nodes._arbitrary_key_: node_id - do: indices.create: @@ -22,8 +24,8 @@ wait_for_active_shards: 1 body: settings: - # ensure everything is allocated on a single node - index.routing.allocation.include._id: $master + # ensure everything is allocated on the same data node + index.routing.allocation.include._id: $node_id index.number_of_shards: 2 index.number_of_replicas: 0 - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml index f12864236d7bd..dec0760fc6b19 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml @@ -3,13 +3,13 @@ - skip: version: " - 6.9.99" reason: expects warnings that pre-7.0.0 will not send - features: "warnings" + features: [warnings, arbitrary_key] - do: - cluster.state: {} - # Get master node id - - - set: { master_node: master } + nodes.info: + node_id: data:true + - set: + nodes._arbitrary_key_: node_id # create index - do: @@ -19,7 +19,7 @@ body: settings: # ensure everything is allocated on a single node - index.routing.allocation.include._id: $master + index.routing.allocation.include._id: $node_id index.number_of_shards: 2 index.number_of_replicas: 0 mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml index 8c4c84c4be152..7fc73ef8fd017 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml @@ -3,13 +3,13 @@ - skip: version: " - 6.9.99" reason: expects warnings that pre-7.0.0 will not send - features: "warnings" + features: [warnings, arbitrary_key] - do: - cluster.state: {} - - # get master node id - - set: { master_node: master } + nodes.info: + node_id: data:true + - set: + nodes._arbitrary_key_: node_id - do: indices.create: @@ -17,8 +17,8 @@ wait_for_active_shards: 1 body: settings: - # ensure everything is allocated on the master node - index.routing.allocation.include._id: $master + # ensure everything is allocated on the same node + index.routing.allocation.include._id: $node_id index.number_of_shards: 2 index.number_of_replicas: 0 index.merge.scheduler.max_merge_count: 4 @@ -63,7 +63,7 @@ - match: { copy-settings-target.settings.index.merge.scheduler.max_merge_count: "4" } - match: { copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" } - match: { copy-settings-target.settings.index.blocks.write: "true" } - - match: { copy-settings-target.settings.index.routing.allocation.include._id: $master } + - match: { copy-settings-target.settings.index.routing.allocation.include._id: $node_id } # now we do a actual shrink and copy settings (by default) - do: @@ -89,7 +89,7 @@ - match: { default-copy-settings-target.settings.index.merge.scheduler.max_merge_count: "4" } - match: { default-copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" } - match: { default-copy-settings-target.settings.index.blocks.write: "true" } - - match: { default-copy-settings-target.settings.index.routing.allocation.include._id: $master } + - match: { default-copy-settings-target.settings.index.routing.allocation.include._id: $node_id } # now we do a actual shrink and try to set no copy settings - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml index 90d4080e46379..5893ccbc84ede 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml @@ -3,13 +3,13 @@ - skip: version: " - 6.9.99" reason: expects warnings that pre-7.0.0 will not send - features: "warnings" + features: [arbitrary_key, warnings] - do: - cluster.state: {} - - # get master node id - - set: { master_node: master } + nodes.info: + node_id: data:true + - set: + nodes._arbitrary_key_: node_id - do: indices.create: @@ -17,8 +17,8 @@ wait_for_active_shards: 1 body: settings: - # ensure everything is allocated on the master node - index.routing.allocation.include._id: $master + # ensure everything is allocated on the same node + index.routing.allocation.include._id: $node_id index.number_of_replicas: 0 index.number_of_shards: 1 index.number_of_routing_shards: 4 @@ -66,7 +66,7 @@ - match: { copy-settings-target.settings.index.merge.scheduler.max_merge_count: "4" } - match: { copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" } - match: { copy-settings-target.settings.index.blocks.write: "true" } - - match: { copy-settings-target.settings.index.routing.allocation.include._id: $master } + - match: { copy-settings-target.settings.index.routing.allocation.include._id: $node_id } # now we do a actual shrink and copy settings (by default) - do: @@ -93,7 +93,7 @@ - match: { default-copy-settings-target.settings.index.merge.scheduler.max_merge_count: "4" } - match: { default-copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" } - match: { default-copy-settings-target.settings.index.blocks.write: "true" } - - match: { default-copy-settings-target.settings.index.routing.allocation.include._id: $master } + - match: { default-copy-settings-target.settings.index.routing.allocation.include._id: $node_id } - do: catch: /illegal_argument_exception/ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.info/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.info/10_basic.yml index 47f6c3e21141a..5821117f4c005 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.info/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.info/10_basic.yml @@ -1,14 +1,13 @@ +setup: + - skip: + features: [arbitrary_key] --- "node_info test": - - do: - cluster.state: {} - - # Get master node id - - set: { master_node: master } - - do: nodes.info: {} + - set: + nodes._arbitrary_key_: node_id - is_true: nodes - is_true: cluster_name - - is_true: nodes.$master.roles + - is_true: nodes.$node_id.roles diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.info/20_transport.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.info/20_transport.yml index efd2260356a2d..09102157bcb99 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.info/20_transport.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.info/20_transport.yml @@ -2,15 +2,15 @@ "node_info test profile is empty": - skip: - features: stash_in_path + features: [stash_in_path, arbitrary_key] - do: - cluster.state: {} - - - set: {master_node: master} + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id - do: nodes.info: metric: [ transport ] - - is_true: nodes.$master.transport.profiles + - is_true: nodes.$node_id.transport.profiles diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.info/30_settings.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.info/30_settings.yml index a63c246b6033e..99b8b6f361a47 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.info/30_settings.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.info/30_settings.yml @@ -1,19 +1,22 @@ --- "node_info test flat_settings": - - do: - cluster.state: {} + - skip: + features: [arbitrary_key] - - set: { master_node: master } + - do: + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id - do: nodes.info: metric: [ settings ] - - match : { nodes.$master.settings.client.type: node } + - match : { nodes.$node_id.settings.client.type: node } - do: nodes.info: metric: [ settings ] flat_settings: true - - match : { nodes.$master.settings.client\.type: node } + - match : { nodes.$node_id.settings.client\.type: node } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/10_basic.yml index 61614e7f8e1b7..099483be9aded 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/10_basic.yml @@ -9,17 +9,20 @@ --- "Nodes stats level": - - do: - cluster.state: {} + - skip: + features: [arbitrary_key] - - set: { master_node: master } + - do: + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id - do: nodes.stats: metric: [ indices ] level: "indices" - - is_true: nodes.$master.indices.indices + - is_true: nodes.$node_id.indices.indices --- "Nodes stats unrecognized parameter": diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml index 998909dd9cf1b..a09619b7255c3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml @@ -1,211 +1,227 @@ --- "Metric - blank": + - skip: + features: [arbitrary_key] - do: - cluster.state: {} - - - set: { master_node: master } + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id - do: nodes.stats: {} - - is_true: nodes.$master.indices.docs - - is_true: nodes.$master.indices.store - - is_true: nodes.$master.indices.indexing - - is_true: nodes.$master.indices.get - - is_true: nodes.$master.indices.search - - is_true: nodes.$master.indices.merges - - is_true: nodes.$master.indices.refresh - - is_true: nodes.$master.indices.flush - - is_true: nodes.$master.indices.warmer - - is_true: nodes.$master.indices.query_cache - - is_true: nodes.$master.indices.fielddata - - is_true: nodes.$master.indices.completion - - is_true: nodes.$master.indices.segments - - is_true: nodes.$master.indices.translog - - is_true: nodes.$master.indices.recovery + - is_true: nodes.$node_id.indices.docs + - is_true: nodes.$node_id.indices.store + - is_true: nodes.$node_id.indices.indexing + - is_true: nodes.$node_id.indices.get + - is_true: nodes.$node_id.indices.search + - is_true: nodes.$node_id.indices.merges + - is_true: nodes.$node_id.indices.refresh + - is_true: nodes.$node_id.indices.flush + - is_true: nodes.$node_id.indices.warmer + - is_true: nodes.$node_id.indices.query_cache + - is_true: nodes.$node_id.indices.fielddata + - is_true: nodes.$node_id.indices.completion + - is_true: nodes.$node_id.indices.segments + - is_true: nodes.$node_id.indices.translog + - is_true: nodes.$node_id.indices.recovery --- "Metric - _all": + - skip: + features: [arbitrary_key] - do: - cluster.state: {} - - - set: { master_node: master } + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id - do: nodes.stats: { metric: _all } - - is_true: nodes.$master.indices.docs - - is_true: nodes.$master.indices.store - - is_true: nodes.$master.indices.indexing - - is_true: nodes.$master.indices.get - - is_true: nodes.$master.indices.search - - is_true: nodes.$master.indices.merges - - is_true: nodes.$master.indices.refresh - - is_true: nodes.$master.indices.flush - - is_true: nodes.$master.indices.warmer - - is_true: nodes.$master.indices.query_cache - - is_true: nodes.$master.indices.fielddata - - is_true: nodes.$master.indices.completion - - is_true: nodes.$master.indices.segments - - is_true: nodes.$master.indices.translog - - is_true: nodes.$master.indices.recovery + - is_true: nodes.$node_id.indices.docs + - is_true: nodes.$node_id.indices.store + - is_true: nodes.$node_id.indices.indexing + - is_true: nodes.$node_id.indices.get + - is_true: nodes.$node_id.indices.search + - is_true: nodes.$node_id.indices.merges + - is_true: nodes.$node_id.indices.refresh + - is_true: nodes.$node_id.indices.flush + - is_true: nodes.$node_id.indices.warmer + - is_true: nodes.$node_id.indices.query_cache + - is_true: nodes.$node_id.indices.fielddata + - is_true: nodes.$node_id.indices.completion + - is_true: nodes.$node_id.indices.segments + - is_true: nodes.$node_id.indices.translog + - is_true: nodes.$node_id.indices.recovery --- "Metric - indices _all": + - skip: + features: [arbitrary_key] - do: - cluster.state: {} - - - set: { master_node: master } + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id - do: nodes.stats: { metric: indices, index_metric: _all } - - is_true: nodes.$master.indices.docs - - is_true: nodes.$master.indices.store - - is_true: nodes.$master.indices.indexing - - is_true: nodes.$master.indices.get - - is_true: nodes.$master.indices.search - - is_true: nodes.$master.indices.merges - - is_true: nodes.$master.indices.refresh - - is_true: nodes.$master.indices.flush - - is_true: nodes.$master.indices.warmer - - is_true: nodes.$master.indices.query_cache - - is_true: nodes.$master.indices.fielddata - - is_true: nodes.$master.indices.completion - - is_true: nodes.$master.indices.segments - - is_true: nodes.$master.indices.translog - - is_true: nodes.$master.indices.recovery + - is_true: nodes.$node_id.indices.docs + - is_true: nodes.$node_id.indices.store + - is_true: nodes.$node_id.indices.indexing + - is_true: nodes.$node_id.indices.get + - is_true: nodes.$node_id.indices.search + - is_true: nodes.$node_id.indices.merges + - is_true: nodes.$node_id.indices.refresh + - is_true: nodes.$node_id.indices.flush + - is_true: nodes.$node_id.indices.warmer + - is_true: nodes.$node_id.indices.query_cache + - is_true: nodes.$node_id.indices.fielddata + - is_true: nodes.$node_id.indices.completion + - is_true: nodes.$node_id.indices.segments + - is_true: nodes.$node_id.indices.translog + - is_true: nodes.$node_id.indices.recovery --- "Metric - one": + - skip: + features: [arbitrary_key] - do: - cluster.state: {} - - - set: { master_node: master } + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id - do: nodes.stats: { metric: indices, index_metric: docs } - - is_true: nodes.$master.indices.docs - - is_false: nodes.$master.indices.store - - is_false: nodes.$master.indices.indexing - - is_false: nodes.$master.indices.get - - is_false: nodes.$master.indices.search - - is_false: nodes.$master.indices.merges - - is_false: nodes.$master.indices.refresh - - is_false: nodes.$master.indices.flush - - is_false: nodes.$master.indices.warmer - - is_false: nodes.$master.indices.query_cache - - is_false: nodes.$master.indices.fielddata - - is_false: nodes.$master.indices.completion - - is_false: nodes.$master.indices.segments - - is_false: nodes.$master.indices.translog - - is_false: nodes.$master.indices.recovery + - is_true: nodes.$node_id.indices.docs + - is_false: nodes.$node_id.indices.store + - is_false: nodes.$node_id.indices.indexing + - is_false: nodes.$node_id.indices.get + - is_false: nodes.$node_id.indices.search + - is_false: nodes.$node_id.indices.merges + - is_false: nodes.$node_id.indices.refresh + - is_false: nodes.$node_id.indices.flush + - is_false: nodes.$node_id.indices.warmer + - is_false: nodes.$node_id.indices.query_cache + - is_false: nodes.$node_id.indices.fielddata + - is_false: nodes.$node_id.indices.completion + - is_false: nodes.$node_id.indices.segments + - is_false: nodes.$node_id.indices.translog + - is_false: nodes.$node_id.indices.recovery --- "Metric - multi": + - skip: + features: [arbitrary_key] - do: - cluster.state: {} - - - set: { master_node: master } + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id - do: nodes.stats: { metric: indices, index_metric: [ store, get, merge ] } - - is_false: nodes.$master.indices.docs - - is_true: nodes.$master.indices.store - - is_false: nodes.$master.indices.indexing - - is_true: nodes.$master.indices.get - - is_false: nodes.$master.indices.search - - is_true: nodes.$master.indices.merges - - is_false: nodes.$master.indices.refresh - - is_false: nodes.$master.indices.flush - - is_false: nodes.$master.indices.warmer - - is_false: nodes.$master.indices.query_cache - - is_false: nodes.$master.indices.fielddata - - is_false: nodes.$master.indices.completion - - is_false: nodes.$master.indices.segments - - is_false: nodes.$master.indices.translog - - is_false: nodes.$master.indices.recovery + - is_false: nodes.$node_id.indices.docs + - is_true: nodes.$node_id.indices.store + - is_false: nodes.$node_id.indices.indexing + - is_true: nodes.$node_id.indices.get + - is_false: nodes.$node_id.indices.search + - is_true: nodes.$node_id.indices.merges + - is_false: nodes.$node_id.indices.refresh + - is_false: nodes.$node_id.indices.flush + - is_false: nodes.$node_id.indices.warmer + - is_false: nodes.$node_id.indices.query_cache + - is_false: nodes.$node_id.indices.fielddata + - is_false: nodes.$node_id.indices.completion + - is_false: nodes.$node_id.indices.segments + - is_false: nodes.$node_id.indices.translog + - is_false: nodes.$node_id.indices.recovery --- "Metric - recovery": + - skip: + features: [arbitrary_key] - do: - cluster.state: {} - - - set: { master_node: master } + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id - do: nodes.stats: { metric: indices, index_metric: [ recovery ] } - - is_false: nodes.$master.indices.docs - - is_false: nodes.$master.indices.store - - is_false: nodes.$master.indices.indexing - - is_false: nodes.$master.indices.get - - is_false: nodes.$master.indices.search - - is_false: nodes.$master.indices.merges - - is_false: nodes.$master.indices.refresh - - is_false: nodes.$master.indices.flush - - is_false: nodes.$master.indices.warmer - - is_false: nodes.$master.indices.query_cache - - is_false: nodes.$master.indices.fielddata - - is_false: nodes.$master.indices.completion - - is_false: nodes.$master.indices.segments - - is_false: nodes.$master.indices.translog - - is_true: nodes.$master.indices.recovery + - is_false: nodes.$node_id.indices.docs + - is_false: nodes.$node_id.indices.store + - is_false: nodes.$node_id.indices.indexing + - is_false: nodes.$node_id.indices.get + - is_false: nodes.$node_id.indices.search + - is_false: nodes.$node_id.indices.merges + - is_false: nodes.$node_id.indices.refresh + - is_false: nodes.$node_id.indices.flush + - is_false: nodes.$node_id.indices.warmer + - is_false: nodes.$node_id.indices.query_cache + - is_false: nodes.$node_id.indices.fielddata + - is_false: nodes.$node_id.indices.completion + - is_false: nodes.$node_id.indices.segments + - is_false: nodes.$node_id.indices.translog + - is_true: nodes.$node_id.indices.recovery --- "Metric - _all include_segment_file_sizes": + - skip: + features: [arbitrary_key] - do: - cluster.state: {} - - - set: { master_node: master } + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id - do: nodes.stats: { metric: indices, index_metric: _all, include_segment_file_sizes: true } - - is_true: nodes.$master.indices.docs - - is_true: nodes.$master.indices.store - - is_true: nodes.$master.indices.indexing - - is_true: nodes.$master.indices.get - - is_true: nodes.$master.indices.search - - is_true: nodes.$master.indices.merges - - is_true: nodes.$master.indices.refresh - - is_true: nodes.$master.indices.flush - - is_true: nodes.$master.indices.warmer - - is_true: nodes.$master.indices.query_cache - - is_true: nodes.$master.indices.fielddata - - is_true: nodes.$master.indices.completion - - is_true: nodes.$master.indices.segments - - is_true: nodes.$master.indices.translog - - is_true: nodes.$master.indices.recovery - - is_true: nodes.$master.indices.segments.file_sizes + - is_true: nodes.$node_id.indices.docs + - is_true: nodes.$node_id.indices.store + - is_true: nodes.$node_id.indices.indexing + - is_true: nodes.$node_id.indices.get + - is_true: nodes.$node_id.indices.search + - is_true: nodes.$node_id.indices.merges + - is_true: nodes.$node_id.indices.refresh + - is_true: nodes.$node_id.indices.flush + - is_true: nodes.$node_id.indices.warmer + - is_true: nodes.$node_id.indices.query_cache + - is_true: nodes.$node_id.indices.fielddata + - is_true: nodes.$node_id.indices.completion + - is_true: nodes.$node_id.indices.segments + - is_true: nodes.$node_id.indices.translog + - is_true: nodes.$node_id.indices.recovery + - is_true: nodes.$node_id.indices.segments.file_sizes --- "Metric - segments include_segment_file_sizes": + - skip: + features: [arbitrary_key] - do: - cluster.state: {} - - - set: { master_node: master } + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id - do: nodes.stats: { metric: indices, index_metric: segments, include_segment_file_sizes: true } - - is_false: nodes.$master.indices.docs - - is_false: nodes.$master.indices.store - - is_false: nodes.$master.indices.indexing - - is_false: nodes.$master.indices.get - - is_false: nodes.$master.indices.search - - is_false: nodes.$master.indices.merges - - is_false: nodes.$master.indices.refresh - - is_false: nodes.$master.indices.flush - - is_false: nodes.$master.indices.warmer - - is_false: nodes.$master.indices.query_cache - - is_false: nodes.$master.indices.fielddata - - is_false: nodes.$master.indices.completion - - is_true: nodes.$master.indices.segments - - is_false: nodes.$master.indices.translog - - is_false: nodes.$master.indices.recovery - - is_true: nodes.$master.indices.segments.file_sizes + - is_false: nodes.$node_id.indices.docs + - is_false: nodes.$node_id.indices.store + - is_false: nodes.$node_id.indices.indexing + - is_false: nodes.$node_id.indices.get + - is_false: nodes.$node_id.indices.search + - is_false: nodes.$node_id.indices.merges + - is_false: nodes.$node_id.indices.refresh + - is_false: nodes.$node_id.indices.flush + - is_false: nodes.$node_id.indices.warmer + - is_false: nodes.$node_id.indices.query_cache + - is_false: nodes.$node_id.indices.fielddata + - is_false: nodes.$node_id.indices.completion + - is_true: nodes.$node_id.indices.segments + - is_false: nodes.$node_id.indices.translog + - is_false: nodes.$node_id.indices.recovery + - is_true: nodes.$node_id.indices.segments.file_sizes diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/20_response_filtering.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/20_response_filtering.yml index 432e5d8c207ec..a478fd7d3f235 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/20_response_filtering.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/20_response_filtering.yml @@ -1,10 +1,11 @@ --- "Nodes Stats with response filtering": + - skip: + features: [arbitrary_key] - do: - cluster.state: {} - - # Get master node id - - set: { master_node: master } + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id # Nodes Stats with no filtering - do: @@ -12,18 +13,18 @@ - is_true: cluster_name - is_true: nodes - - is_true: nodes.$master.name - - is_true: nodes.$master.indices - - is_true: nodes.$master.indices.docs - - gte: { nodes.$master.indices.docs.count: 0 } - - is_true: nodes.$master.indices.segments - - gte: { nodes.$master.indices.segments.count: 0 } - - is_true: nodes.$master.jvm - - is_true: nodes.$master.jvm.threads - - gte: { nodes.$master.jvm.threads.count: 0 } - - is_true: nodes.$master.jvm.buffer_pools.direct - - gte: { nodes.$master.jvm.buffer_pools.direct.count: 0 } - - gte: { nodes.$master.jvm.buffer_pools.direct.used_in_bytes: 0 } + - is_true: nodes.$node_id.name + - is_true: nodes.$node_id.indices + - is_true: nodes.$node_id.indices.docs + - gte: { nodes.$node_id.indices.docs.count: 0 } + - is_true: nodes.$node_id.indices.segments + - gte: { nodes.$node_id.indices.segments.count: 0 } + - is_true: nodes.$node_id.jvm + - is_true: nodes.$node_id.jvm.threads + - gte: { nodes.$node_id.jvm.threads.count: 0 } + - is_true: nodes.$node_id.jvm.buffer_pools.direct + - gte: { nodes.$node_id.jvm.buffer_pools.direct.count: 0 } + - gte: { nodes.$node_id.jvm.buffer_pools.direct.used_in_bytes: 0 } # Nodes Stats with only "cluster_name" field - do: @@ -32,9 +33,9 @@ - is_true: cluster_name - is_false: nodes - - is_false: nodes.$master.name - - is_false: nodes.$master.indices - - is_false: nodes.$master.jvm + - is_false: nodes.$node_id.name + - is_false: nodes.$node_id.indices + - is_false: nodes.$node_id.jvm # Nodes Stats with "nodes" field and sub-fields - do: @@ -43,18 +44,18 @@ - is_false: cluster_name - is_true: nodes - - is_true: nodes.$master.name - - is_true: nodes.$master.indices - - is_true: nodes.$master.indices.docs - - gte: { nodes.$master.indices.docs.count: 0 } - - is_true: nodes.$master.indices.segments - - gte: { nodes.$master.indices.segments.count: 0 } - - is_true: nodes.$master.jvm - - is_true: nodes.$master.jvm.threads - - gte: { nodes.$master.jvm.threads.count: 0 } - - is_true: nodes.$master.jvm.buffer_pools.direct - - gte: { nodes.$master.jvm.buffer_pools.direct.count: 0 } - - gte: { nodes.$master.jvm.buffer_pools.direct.used_in_bytes: 0 } + - is_true: nodes.$node_id.name + - is_true: nodes.$node_id.indices + - is_true: nodes.$node_id.indices.docs + - gte: { nodes.$node_id.indices.docs.count: 0 } + - is_true: nodes.$node_id.indices.segments + - gte: { nodes.$node_id.indices.segments.count: 0 } + - is_true: nodes.$node_id.jvm + - is_true: nodes.$node_id.jvm.threads + - gte: { nodes.$node_id.jvm.threads.count: 0 } + - is_true: nodes.$node_id.jvm.buffer_pools.direct + - gte: { nodes.$node_id.jvm.buffer_pools.direct.count: 0 } + - gte: { nodes.$node_id.jvm.buffer_pools.direct.used_in_bytes: 0 } # Nodes Stats with "nodes.*.indices" field and sub-fields - do: @@ -63,13 +64,13 @@ - is_false: cluster_name - is_true: nodes - - is_false: nodes.$master.name - - is_true: nodes.$master.indices - - is_true: nodes.$master.indices.docs - - gte: { nodes.$master.indices.docs.count: 0 } - - is_true: nodes.$master.indices.segments - - gte: { nodes.$master.indices.segments.count: 0 } - - is_false: nodes.$master.jvm + - is_false: nodes.$node_id.name + - is_true: nodes.$node_id.indices + - is_true: nodes.$node_id.indices.docs + - gte: { nodes.$node_id.indices.docs.count: 0 } + - is_true: nodes.$node_id.indices.segments + - gte: { nodes.$node_id.indices.segments.count: 0 } + - is_false: nodes.$node_id.jvm # Nodes Stats with "nodes.*.name" and "nodes.*.indices.docs.count" fields - do: @@ -78,12 +79,12 @@ - is_false: cluster_name - is_true: nodes - - is_true: nodes.$master.name - - is_true: nodes.$master.indices - - is_true: nodes.$master.indices.docs - - gte: { nodes.$master.indices.docs.count: 0 } - - is_false: nodes.$master.indices.segments - - is_false: nodes.$master.jvm + - is_true: nodes.$node_id.name + - is_true: nodes.$node_id.indices + - is_true: nodes.$node_id.indices.docs + - gte: { nodes.$node_id.indices.docs.count: 0 } + - is_false: nodes.$node_id.indices.segments + - is_false: nodes.$node_id.jvm # Nodes Stats with all "count" fields - do: @@ -92,18 +93,18 @@ - is_false: cluster_name - is_true: nodes - - is_false: nodes.$master.name - - is_true: nodes.$master.indices - - is_true: nodes.$master.indices.docs - - gte: { nodes.$master.indices.docs.count: 0 } - - is_true: nodes.$master.indices.segments - - gte: { nodes.$master.indices.segments.count: 0 } - - is_true: nodes.$master.jvm - - is_true: nodes.$master.jvm.threads - - gte: { nodes.$master.jvm.threads.count: 0 } - - is_true: nodes.$master.jvm.buffer_pools.direct - - gte: { nodes.$master.jvm.buffer_pools.direct.count: 0 } - - is_false: nodes.$master.jvm.buffer_pools.direct.used_in_bytes + - is_false: nodes.$node_id.name + - is_true: nodes.$node_id.indices + - is_true: nodes.$node_id.indices.docs + - gte: { nodes.$node_id.indices.docs.count: 0 } + - is_true: nodes.$node_id.indices.segments + - gte: { nodes.$node_id.indices.segments.count: 0 } + - is_true: nodes.$node_id.jvm + - is_true: nodes.$node_id.jvm.threads + - gte: { nodes.$node_id.jvm.threads.count: 0 } + - is_true: nodes.$node_id.jvm.buffer_pools.direct + - gte: { nodes.$node_id.jvm.buffer_pools.direct.count: 0 } + - is_false: nodes.$node_id.jvm.buffer_pools.direct.used_in_bytes # Nodes Stats with all "count" fields in sub-fields of "jvm" field - do: @@ -112,16 +113,16 @@ - is_false: cluster_name - is_true: nodes - - is_false: nodes.$master.name - - is_false: nodes.$master.indices - - is_false: nodes.$master.indices.docs.count - - is_false: nodes.$master.indices.segments.count - - is_true: nodes.$master.jvm - - is_true: nodes.$master.jvm.threads - - gte: { nodes.$master.jvm.threads.count: 0 } - - is_true: nodes.$master.jvm.buffer_pools.direct - - gte: { nodes.$master.jvm.buffer_pools.direct.count: 0 } - - is_false: nodes.$master.jvm.buffer_pools.direct.used_in_bytes + - is_false: nodes.$node_id.name + - is_false: nodes.$node_id.indices + - is_false: nodes.$node_id.indices.docs.count + - is_false: nodes.$node_id.indices.segments.count + - is_true: nodes.$node_id.jvm + - is_true: nodes.$node_id.jvm.threads + - gte: { nodes.$node_id.jvm.threads.count: 0 } + - is_true: nodes.$node_id.jvm.buffer_pools.direct + - gte: { nodes.$node_id.jvm.buffer_pools.direct.count: 0 } + - is_false: nodes.$node_id.jvm.buffer_pools.direct.used_in_bytes # Nodes Stats with "nodes.*.fs.data" fields - do: @@ -130,13 +131,13 @@ - is_false: cluster_name - is_true: nodes - - is_false: nodes.$master.name - - is_false: nodes.$master.indices - - is_false: nodes.$master.jvm - - is_true: nodes.$master.fs.data - - is_true: nodes.$master.fs.data.0.path - - is_true: nodes.$master.fs.data.0.type - - is_true: nodes.$master.fs.data.0.total_in_bytes + - is_false: nodes.$node_id.name + - is_false: nodes.$node_id.indices + - is_false: nodes.$node_id.jvm + - is_true: nodes.$node_id.fs.data + - is_true: nodes.$node_id.fs.data.0.path + - is_true: nodes.$node_id.fs.data.0.type + - is_true: nodes.$node_id.fs.data.0.total_in_bytes # Nodes Stats with "nodes.*.fs.data.t*" fields - do: @@ -145,21 +146,22 @@ - is_false: cluster_name - is_true: nodes - - is_false: nodes.$master.name - - is_false: nodes.$master.indices - - is_false: nodes.$master.jvm - - is_true: nodes.$master.fs.data - - is_false: nodes.$master.fs.data.0.path - - is_true: nodes.$master.fs.data.0.type - - is_true: nodes.$master.fs.data.0.total_in_bytes + - is_false: nodes.$node_id.name + - is_false: nodes.$node_id.indices + - is_false: nodes.$node_id.jvm + - is_true: nodes.$node_id.fs.data + - is_false: nodes.$node_id.fs.data.0.path + - is_true: nodes.$node_id.fs.data.0.type + - is_true: nodes.$node_id.fs.data.0.total_in_bytes --- "Nodes Stats filtered using both includes and excludes filters": + - skip: + features: [arbitrary_key] - do: - cluster.state: {} - - # Get master node id - - set: { master_node: master } + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id # Nodes Stats with "nodes" field but no JVM stats - do: @@ -168,10 +170,10 @@ - is_false: cluster_name - is_true: nodes - - is_true: nodes.$master.name - - is_true: nodes.$master.os - - is_false: nodes.$master.indices - - is_false: nodes.$master.jvm + - is_true: nodes.$node_id.name + - is_true: nodes.$node_id.os + - is_false: nodes.$node_id.indices + - is_false: nodes.$node_id.jvm # Nodes Stats with "nodes.*.indices" field and sub-fields but no indices segments - do: @@ -180,10 +182,10 @@ - is_false: cluster_name - is_true: nodes - - is_false: nodes.$master.name - - is_true: nodes.$master.indices - - is_true: nodes.$master.indices.docs - - is_false: nodes.$master.indices.segments + - is_false: nodes.$node_id.name + - is_true: nodes.$node_id.indices + - is_true: nodes.$node_id.indices.docs + - is_false: nodes.$node_id.indices.segments # Nodes Stats with "nodes.*.fs.data.t*" fields but no "type" field - do: @@ -192,9 +194,9 @@ - is_false: cluster_name - is_true: nodes - - is_false: nodes.$master.name - - is_false: nodes.$master.indices - - is_false: nodes.$master.jvm - - is_true: nodes.$master.fs.data - - is_false: nodes.$master.fs.data.0.type - - is_true: nodes.$master.fs.data.0.total_in_bytes + - is_false: nodes.$node_id.name + - is_false: nodes.$node_id.indices + - is_false: nodes.$node_id.jvm + - is_true: nodes.$node_id.fs.data + - is_false: nodes.$node_id.fs.data.0.type + - is_true: nodes.$node_id.fs.data.0.total_in_bytes diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/30_discovery.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/30_discovery.yml index ad8058876ae49..a6b7f29a183c8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/30_discovery.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/30_discovery.yml @@ -1,13 +1,13 @@ --- "Discovery stats": - skip: - version: " - 6.0.99" - reason: "published_cluster_states_received arrived in 6.1.0" - - do: - cluster.state: {} + features: [arbitrary_key] - # Get master node id - - set: { master_node: master } + - do: + nodes.info: + node_id: _master + - set: + nodes._arbitrary_key_: master - do: nodes.stats: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.get/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.get/10_basic.yml index caf97b302f132..addeb3226c575 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.get/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.get/10_basic.yml @@ -1,9 +1,6 @@ --- "get task test": # Note that this gets much better testing in reindex's tests because it actually saves the task - - do: - cluster.state: {} - - do: catch: missing tasks.get: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.list/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.list/10_basic.yml index 4fdfc378bee26..1742134af2b75 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.list/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.list/10_basic.yml @@ -1,16 +1,18 @@ --- "tasks_list test": - - do: - cluster.state: {} + - skip: + features: [arbitrary_key] - # Get master node id - - set: { master_node: master } + - do: + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id - do: tasks.list: {} - is_true: nodes - - is_true: nodes.$master.roles + - is_true: nodes.$node_id.roles - do: tasks.list: diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java index fea1c3997530c..bdcf426d118f3 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java @@ -47,7 +47,8 @@ public final class Features { "warnings", "yaml", "contains", - "transform_and_set" + "transform_and_set", + "arbitrary_key" )); private Features() { diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ObjectPath.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ObjectPath.java index 8ebeca4233abd..36d1ff04a5596 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ObjectPath.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ObjectPath.java @@ -102,7 +102,17 @@ private Object evaluate(String key, Object object, Stash stash) throws IOExcepti } if (object instanceof Map) { - return ((Map) object).get(key); + final Map objectAsMap = (Map) object; + if ("_arbitrary_key_".equals(key)) { + if (objectAsMap.isEmpty()) { + throw new IllegalArgumentException("requested [" + key + "] but the map was empty"); + } + if (objectAsMap.containsKey(key)) { + throw new IllegalArgumentException("requested meta-key [" + key + "] but the map unexpectedly contains this key"); + } + return objectAsMap.keySet().iterator().next(); + } + return objectAsMap.get(key); } if (object instanceof List) { List list = (List) object; @@ -149,7 +159,7 @@ private String[] parsePath(String path) { list.add(current.toString()); } - return list.toArray(new String[list.size()]); + return list.toArray(new String[0]); } /** diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ObjectPathTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ObjectPathTests.java index 79d6d42092a85..9345d73733076 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ObjectPathTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ObjectPathTests.java @@ -34,6 +34,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.isOneOf; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -181,6 +182,56 @@ public void testEvaluateObjectKeys() throws Exception { assertThat(strings, contains("template_1", "template_2")); } + public void testEvaluateArbitraryKey() throws Exception { + XContentBuilder xContentBuilder = randomXContentBuilder(); + xContentBuilder.startObject(); + xContentBuilder.startObject("metadata"); + xContentBuilder.startObject("templates"); + xContentBuilder.startObject("template_1"); + xContentBuilder.field("field1", "value"); + xContentBuilder.endObject(); + xContentBuilder.startObject("template_2"); + xContentBuilder.field("field2", "value"); + xContentBuilder.field("field3", "value"); + xContentBuilder.endObject(); + xContentBuilder.startObject("template_3"); + xContentBuilder.endObject(); + xContentBuilder.startObject("template_4"); + xContentBuilder.field("_arbitrary_key_", "value"); + xContentBuilder.endObject(); + xContentBuilder.endObject(); + xContentBuilder.endObject(); + xContentBuilder.endObject(); + ObjectPath objectPath = ObjectPath.createFromXContent(xContentBuilder.contentType().xContent(), + BytesReference.bytes(xContentBuilder)); + + { + final Object object = objectPath.evaluate("metadata.templates.template_1._arbitrary_key_"); + assertThat(object, instanceOf(String.class)); + final String key = (String) object; + assertThat(key, equalTo("field1")); + } + + { + final Object object = objectPath.evaluate("metadata.templates.template_2._arbitrary_key_"); + assertThat(object, instanceOf(String.class)); + final String key = (String) object; + assertThat(key, isOneOf("field2", "field3")); + } + + { + final IllegalArgumentException exception + = expectThrows(IllegalArgumentException.class, () -> objectPath.evaluate("metadata.templates.template_3._arbitrary_key_")); + assertThat(exception.getMessage(), equalTo("requested [_arbitrary_key_] but the map was empty")); + } + + { + final IllegalArgumentException exception + = expectThrows(IllegalArgumentException.class, () -> objectPath.evaluate("metadata.templates.template_4._arbitrary_key_")); + assertThat(exception.getMessage(), equalTo("requested meta-key [_arbitrary_key_] but the map unexpectedly contains this key")); + } + } + public void testEvaluateStashInPropertyName() throws Exception { XContentBuilder xContentBuilder = randomXContentBuilder(); xContentBuilder.startObject();