diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 5fcb619af6570..c91d1a702b7ec 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -315,7 +315,6 @@
/src/plugins/es_ui_shared/ @elastic/kibana-stack-management
/x-pack/plugins/cross_cluster_replication/ @elastic/kibana-stack-management
/x-pack/plugins/index_lifecycle_management/ @elastic/kibana-stack-management
-/x-pack/plugins/console_extensions/ @elastic/kibana-stack-management
/x-pack/plugins/grokdebugger/ @elastic/kibana-stack-management
/x-pack/plugins/index_management/ @elastic/kibana-stack-management
/x-pack/plugins/license_api_guard/ @elastic/kibana-stack-management
@@ -330,7 +329,6 @@
/x-pack/plugins/ingest_pipelines/ @elastic/kibana-stack-management
/packages/kbn-ace/ @elastic/kibana-stack-management
/packages/kbn-monaco/ @elastic/kibana-stack-management
-#CC# /x-pack/plugins/console_extensions/ @elastic/kibana-stack-management
#CC# /x-pack/plugins/cross_cluster_replication/ @elastic/kibana-stack-management
# Security Solution
diff --git a/.github/workflows/project-assigner.yml b/.github/workflows/project-assigner.yml
index 4966a0b506317..f2359846504bf 100644
--- a/.github/workflows/project-assigner.yml
+++ b/.github/workflows/project-assigner.yml
@@ -8,8 +8,17 @@ jobs:
name: Assign issue or PR to project based on label
steps:
- name: Assign to project
- uses: elastic/github-actions/project-assigner@v2.0.0
+ uses: elastic/github-actions/project-assigner@v2.1.0
id: project_assigner
with:
- issue-mappings: '[{"label": "Feature:Lens", "projectNumber": 32, "columnName": "Long-term goals"}, {"label": "Feature:Canvas", "projectNumber": 38, "columnName": "Inbox"}, {"label": "Feature:Dashboard", "projectNumber": 68, "columnName": "Inbox"}, {"label": "Feature:Drilldowns", "projectNumber": 68, "columnName": "Inbox"}, {"label": "Feature:Input Controls", "projectNumber": 72, "columnName": "Inbox"}]'
+ issue-mappings: |
+ [
+ {"label": "Feature:Lens", "projectNumber": 32, "columnName": "Long-term goals"},
+ {"label": "Feature:Discover", "projectNumber": 44, "columnName": "Inbox"},
+ {"label": "Feature:Canvas", "projectNumber": 38, "columnName": "Inbox"},
+ {"label": "Feature:Dashboard", "projectNumber": 68, "columnName": "Inbox"},
+ {"label": "Feature:Drilldowns", "projectNumber": 68, "columnName": "Inbox"},
+ {"label": "Feature:Input Controls", "projectNumber": 72, "columnName": "Inbox"},
+ {"label": "Team:Security", "projectNumber": 320, "columnName": "Awaiting triage", "projectScope": "org"}
+ ]
ghToken: ${{ secrets.PROJECT_ASSIGNER_TOKEN }}
diff --git a/config/kibana.yml b/config/kibana.yml
index eefb6bb8bacda..dea9849f17b28 100644
--- a/config/kibana.yml
+++ b/config/kibana.yml
@@ -42,6 +42,10 @@
#elasticsearch.username: "kibana_system"
#elasticsearch.password: "pass"
+# Kibana can also authenticate to Elasticsearch via "service account tokens".
+# If may use this token instead of a username/password.
+# elasticsearch.serviceAccountToken: "my_token"
+
# Enables SSL and paths to the PEM-format SSL certificate and SSL key files, respectively.
# These settings enable SSL for outgoing requests from the Kibana server to the browser.
#server.ssl.enabled: false
diff --git a/docs/apm/agent-configuration.asciidoc b/docs/apm/agent-configuration.asciidoc
index 2574d254ac14c..f2e07412c4a38 100644
--- a/docs/apm/agent-configuration.asciidoc
+++ b/docs/apm/agent-configuration.asciidoc
@@ -43,6 +43,7 @@ Supported configurations are also tagged with the image:./images/dynamic-config.
[horizontal]
Go Agent:: {apm-go-ref}/configuration.html[Configuration reference]
+iOS agent:: _Not yet supported_
Java Agent:: {apm-java-ref}/configuration.html[Configuration reference]
.NET Agent:: {apm-dotnet-ref}/configuration.html[Configuration reference]
Node.js Agent:: {apm-node-ref}/configuration.html[Configuration reference]
diff --git a/docs/apm/apm-alerts.asciidoc b/docs/apm/apm-alerts.asciidoc
index 3e3e2b178ff10..42016ac08bfc7 100644
--- a/docs/apm/apm-alerts.asciidoc
+++ b/docs/apm/apm-alerts.asciidoc
@@ -1,69 +1,57 @@
[role="xpack"]
[[apm-alerts]]
-=== Alerts
+=== Alerts and rules
++++
Create an alert
++++
+The APM app allows you to define **rules** to detect complex conditions within your APM data
+and trigger built-in **actions** when those conditions are met.
-The APM app integrates with Kibana's {kibana-ref}/alerting-getting-started.html[alerting and actions] feature.
-It provides a set of built-in **actions** and APM specific threshold **alerts** for you to use
-and enables central management of all alerts from <>.
+The following **rules** are supported:
+
+* Latency anomaly rule:
+Alert when latency of a service is abnormal
+* Transaction error rate threshold rule:
+Alert when the service's transaction error rate is above the defined threshold
+* Error count threshold rule:
+Alert when the number of errors in a service exceeds a defined threshold
[role="screenshot"]
image::apm/images/apm-alert.png[Create an alert in the APM app]
-For a walkthrough of the alert flyout panel, including detailed information on each configurable property,
-see Kibana's <>.
-
-The APM app supports four different types of alerts:
-
-* Transaction duration anomaly:
-alerts when the service's transaction duration reaches a certain anomaly score
-* Transaction duration threshold:
-alerts when the service's transaction duration exceeds a given time limit over a given time frame
-* Transaction error rate threshold:
-alerts when the service's transaction error rate is above the selected rate over a given time frame
-* Error count threshold:
-alerts when service exceeds a selected number of errors over a given time frame
+For a complete walkthrough of the **Create rule** flyout panel, including detailed information on each configurable property,
+see Kibana's <>.
-Below, we'll walk through the creation of two of these alerts.
+Below, we'll walk through the creation of two APM rules.
[float]
[[apm-create-transaction-alert]]
-=== Example: create a transaction duration alert
+=== Example: create a latency anomaly rule
-Transaction duration alerts trigger when the duration of a specific transaction type in a service exceeds a defined threshold.
-This guide will create an alert for the `opbeans-java` service based on the following criteria:
+Latency anomaly rules trigger when the latency of a service is abnormal.
+This guide will create an alert for all services based on the following criteria:
-* Environment: Production
-* Transaction type: `transaction.type:request`
-* Average request is above `1500ms` for the last 5 minutes
-* Check every 10 minutes, and repeat the alert every 30 minutes
-* Send the alert via Slack
+* Environment: production
+* Severity level: critical
+* Run every five minutes
+* Send an alert to a Slack channel only when the rule status changes
-From the APM app, navigate to the `opbeans-java` service and select
-**Alerts** > **Create threshold alert** > **Transaction duration**.
+From any page in the APM app, select **Alerts and rules** > **Latency** > **Create anomaly rule**.
+Change the name of the alert, but do not edit the tags.
-`Transaction duration | opbeans-java` is automatically set as the name of the alert,
-and `apm` and `service.name:opbeans-java` are added as tags.
-It's fine to change the name of the alert, but do not edit the tags.
+Based on the criteria above, define the following rule details:
-Based on the alert criteria, define the following alert details:
+* **Check every** - `5 minutes`
+* **Notify** - "Only on status change"
+* **Environment** - `all`
+* **Has anomaly with severity** - `critical`
-* **Check every** - `10 minutes`
-* **Notify every** - `30 minutes`
-* **TYPE** - `request`
-* **WHEN** - `avg`
-* **IS ABOVE** - `1500ms`
-* **FOR THE LAST** - `5 minutes`
-
-Select an action type.
-Multiple action types can be selected, but in this example, we want to post to a Slack channel.
+Next, add a connector. Multiple connectors can be selected, but in this example we're interested in Slack.
Select **Slack** > **Create a connector**.
Enter a name for the connector,
-and paste the webhook URL.
+and paste your Slack webhook URL.
See Slack's webhook documentation if you need to create one.
A default message is provided as a starting point for your alert.
@@ -72,35 +60,32 @@ to pass additional alert values at the time a condition is detected to an action
A list of available variables can be accessed by selecting the
**add variable** button image:apm/images/add-variable.png[add variable button].
-Select **Save**. The alert has been created and is now active!
+Click **Save**. The rule has been created and is now active!
[float]
[[apm-create-error-alert]]
-=== Example: create an error rate alert
+=== Example: create an error count threshold alert
-Error rate alerts trigger when the number of errors in a service exceeds a defined threshold.
-This guide creates an alert for the `opbeans-python` service based on the following criteria:
+The error count threshold alert triggers when the number of errors in a service exceeds a defined threshold.
+This guide will create an alert for all services based on the following criteria:
-* Environment: Production
+* All environments
* Error rate is above 25 for the last minute
-* Check every 1 minute, and repeat the alert every 10 minutes
-* Send the alert via email to the `opbeans-python` team
-
-From the APM app, navigate to the `opbeans-python` service and select
-**Alerts** > **Create threshold alert** > **Error rate**.
+* Check every 1 minute, and alert every time the rule is active
+* Send the alert via email to the site reliability team
-`Error rate | opbeans-python` is automatically set as the name of the alert,
-and `apm` and `service.name:opbeans-python` are added as tags.
-It's fine to change the name of the alert, but do not edit the tags.
+From any page in the APM app, select **Alerts and rules** > **Error count** > **Create threshold rule**.
+Change the name of the alert, but do not edit the tags.
-Based on the alert criteria, define the following alert details:
+Based on the criteria above, define the following rule details:
* **Check every** - `1 minute`
-* **Notify every** - `10 minutes`
-* **IS ABOVE** - `25 errors`
-* **FOR THE LAST** - `1 minute`
+* **Notify** - "Every time alert is active"
+* **Environment** - `all`
+* **Is above** - `25 errors`
+* **For the last** - `1 minute`
-Select the **Email** action type and click **Create a connector**.
+Select the **Email** connector and click **Create a connector**.
Fill out the required details: sender, host, port, etc., and click **save**.
A default message is provided as a starting point for your alert.
@@ -109,14 +94,14 @@ to pass additional alert values at the time a condition is detected to an action
A list of available variables can be accessed by selecting the
**add variable** button image:apm/images/add-variable.png[add variable button].
-Select **Save**. The alert has been created and is now active!
+Click **Save**. The alert has been created and is now active!
[float]
[[apm-alert-manage]]
-=== Manage alerts and actions
+=== Manage alerts and rules
-From the APM app, select **Alerts** > **View active alerts** to be taken to the Kibana alerts and actions management page.
-From this page, you can create, edit, disable, mute, and delete alerts, and create, edit, and disable connectors.
+From the APM app, select **Alerts and rules** > **Manage rules** to be taken to the Kibana **Rules and Connectors** page.
+From this page, you can disable, mute, and delete APM alerts.
[float]
[[apm-alert-more-info]]
@@ -126,4 +111,4 @@ See {kibana-ref}/alerting-getting-started.html[alerting and actions] for more in
NOTE: If you are using an **on-premise** Elastic Stack deployment with security,
communication between Elasticsearch and Kibana must have TLS configured.
-More information is in the alerting {kibana-ref}/alerting-setup.html#alerting-prerequisites[prerequisites].
\ No newline at end of file
+More information is in the alerting {kibana-ref}/alerting-setup.html#alerting-prerequisites[prerequisites].
diff --git a/docs/apm/filters.asciidoc b/docs/apm/filters.asciidoc
index 56602ab7c05c9..c0ea81c87378b 100644
--- a/docs/apm/filters.asciidoc
+++ b/docs/apm/filters.asciidoc
@@ -36,6 +36,7 @@ It's vital to be consistent when naming environments in your agents.
To learn how to configure service environments, see the specific agent documentation:
* *Go:* {apm-go-ref}/configuration.html#config-environment[`ELASTIC_APM_ENVIRONMENT`]
+* *iOS agent:* _Not yet supported_
* *Java:* {apm-java-ref}/config-core.html#config-environment[`environment`]
* *.NET:* {apm-dotnet-ref}/config-core.html#config-environment[`Environment`]
* *Node.js:* {apm-node-ref}/configuration.html#environment[`environment`]
diff --git a/docs/apm/images/apm-agent-configuration.png b/docs/apm/images/apm-agent-configuration.png
index 07398f0609187..22fd9d75c3d73 100644
Binary files a/docs/apm/images/apm-agent-configuration.png and b/docs/apm/images/apm-agent-configuration.png differ
diff --git a/docs/apm/images/apm-alert.png b/docs/apm/images/apm-alert.png
index 2ac91b6b19219..a845d65dd24a5 100644
Binary files a/docs/apm/images/apm-alert.png and b/docs/apm/images/apm-alert.png differ
diff --git a/docs/apm/images/apm-error-group.png b/docs/apm/images/apm-error-group.png
index 359bdc6b704e9..1326e97f757d6 100644
Binary files a/docs/apm/images/apm-error-group.png and b/docs/apm/images/apm-error-group.png differ
diff --git a/docs/apm/images/apm-logs-tab.png b/docs/apm/images/apm-logs-tab.png
index 77aecf744bc7f..891d2b7a1dd69 100644
Binary files a/docs/apm/images/apm-logs-tab.png and b/docs/apm/images/apm-logs-tab.png differ
diff --git a/docs/apm/images/apm-services-overview.png b/docs/apm/images/apm-services-overview.png
index 1c16ac5b572c3..7aeb5f1ac379f 100644
Binary files a/docs/apm/images/apm-services-overview.png and b/docs/apm/images/apm-services-overview.png differ
diff --git a/docs/apm/images/apm-settings.png b/docs/apm/images/apm-settings.png
index c821b7fb76e79..2201ed5fcaa72 100644
Binary files a/docs/apm/images/apm-settings.png and b/docs/apm/images/apm-settings.png differ
diff --git a/docs/apm/images/apm-span-detail.png b/docs/apm/images/apm-span-detail.png
index bacb2d372c166..c9f55575b2232 100644
Binary files a/docs/apm/images/apm-span-detail.png and b/docs/apm/images/apm-span-detail.png differ
diff --git a/docs/apm/images/apm-traces.png b/docs/apm/images/apm-traces.png
index 0e9062ee448b4..ee16f9ed16a18 100644
Binary files a/docs/apm/images/apm-traces.png and b/docs/apm/images/apm-traces.png differ
diff --git a/docs/apm/images/apm-transaction-duration-dist.png b/docs/apm/images/apm-transaction-duration-dist.png
index 863f493f20db4..91ae6c3a59ad2 100644
Binary files a/docs/apm/images/apm-transaction-duration-dist.png and b/docs/apm/images/apm-transaction-duration-dist.png differ
diff --git a/docs/apm/images/apm-transaction-response-dist.png b/docs/apm/images/apm-transaction-response-dist.png
index 2f3e69f263a28..70e5ad7041287 100644
Binary files a/docs/apm/images/apm-transaction-response-dist.png and b/docs/apm/images/apm-transaction-response-dist.png differ
diff --git a/docs/apm/images/apm-transaction-sample.png b/docs/apm/images/apm-transaction-sample.png
index 0e4bc5f3f878a..54eea902f0311 100644
Binary files a/docs/apm/images/apm-transaction-sample.png and b/docs/apm/images/apm-transaction-sample.png differ
diff --git a/docs/apm/images/apm-transactions-overview.png b/docs/apm/images/apm-transactions-overview.png
index be292c37e24e0..66cf739a861b7 100644
Binary files a/docs/apm/images/apm-transactions-overview.png and b/docs/apm/images/apm-transactions-overview.png differ
diff --git a/docs/apm/images/service-maps-java.png b/docs/apm/images/service-maps-java.png
index d7c0786e406d9..25600b690a5bd 100644
Binary files a/docs/apm/images/service-maps-java.png and b/docs/apm/images/service-maps-java.png differ
diff --git a/docs/apm/images/service-maps.png b/docs/apm/images/service-maps.png
index 190b7af3c560e..511d8401b22f3 100644
Binary files a/docs/apm/images/service-maps.png and b/docs/apm/images/service-maps.png differ
diff --git a/docs/apm/service-maps.asciidoc b/docs/apm/service-maps.asciidoc
index 99a6205ae010e..f43253d819429 100644
--- a/docs/apm/service-maps.asciidoc
+++ b/docs/apm/service-maps.asciidoc
@@ -108,6 +108,7 @@ Service maps are supported for the following Agent versions:
[horizontal]
Go agent:: ≥ v1.7.0
+iOS agent:: _Not yet supported_
Java agent:: ≥ v1.13.0
.NET agent:: ≥ v1.3.0
Node.js agent:: ≥ v3.6.0
diff --git a/docs/apm/transactions.asciidoc b/docs/apm/transactions.asciidoc
index c2a3e0bc2502d..76006d375d075 100644
--- a/docs/apm/transactions.asciidoc
+++ b/docs/apm/transactions.asciidoc
@@ -100,22 +100,22 @@ the selected transaction group.
image::apm/images/apm-transaction-response-dist.png[Example view of response time distribution]
[[transaction-duration-distribution]]
-==== Transactions duration distribution
+==== Latency distribution
-This chart plots all transaction durations for the given time period.
+A plot of all transaction durations for the given time period.
The screenshot below shows a typical distribution,
and indicates most of our requests were served quickly -- awesome!
-It's the requests on the right, the ones taking longer than average, that we probably want to focus on.
+It's the requests on the right, the ones taking longer than average, that we probably need to focus on.
[role="screenshot"]
-image::apm/images/apm-transaction-duration-dist.png[Example view of transactions duration distribution graph]
+image::apm/images/apm-transaction-duration-dist.png[Example view of latency distribution graph]
-Select a transaction duration _bucket_ to display up to ten trace samples.
+Select a latency duration _bucket_ to display up to ten trace samples.
[[transaction-trace-sample]]
==== Trace sample
-Trace samples are based on the _bucket_ selection in the *Transactions duration distribution* chart;
+Trace samples are based on the _bucket_ selection in the *Latency distribution* chart;
update the samples by selecting a new _bucket_.
The number of requests per bucket is displayed when hovering over the graph,
and the selected bucket is highlighted to stand out.
diff --git a/docs/apm/troubleshooting.asciidoc b/docs/apm/troubleshooting.asciidoc
index 8cab7bb03da75..4a62f71528676 100644
--- a/docs/apm/troubleshooting.asciidoc
+++ b/docs/apm/troubleshooting.asciidoc
@@ -15,6 +15,7 @@ don't forget to check our other troubleshooting guides or discussion forum:
* {apm-server-ref}/troubleshooting.html[APM Server troubleshooting]
* {apm-dotnet-ref}/troubleshooting.html[.NET agent troubleshooting]
* {apm-go-ref}/troubleshooting.html[Go agent troubleshooting]
+* {apm-ios-ref}/troubleshooting.html[iOS agent troubleshooting]
* {apm-java-ref}/trouble-shooting.html[Java agent troubleshooting]
* {apm-node-ref}/troubleshooting.html[Node.js agent troubleshooting]
* {apm-php-ref}/troubleshooting.html[PHP agent troubleshooting]
diff --git a/docs/developer/plugin-list.asciidoc b/docs/developer/plugin-list.asciidoc
index eee92ba433721..5f49360c926bf 100644
--- a/docs/developer/plugin-list.asciidoc
+++ b/docs/developer/plugin-list.asciidoc
@@ -354,10 +354,6 @@ The plugin exposes the static DefaultEditorController class to consume.
The client-side plugin configures following values:
-|{kib-repo}blob/{branch}/x-pack/plugins/console_extensions/README.md[consoleExtensions]
-|This plugin provides autocomplete definitions of licensed APIs to the OSS Console plugin.
-
-
|{kib-repo}blob/{branch}/x-pack/plugins/cross_cluster_replication/README.md[crossClusterReplication]
|You can run a local cluster and simulate a remote cluster within a single Kibana directory.
@@ -393,7 +389,7 @@ security and spaces filtering as well as performing audit logging.
|{kib-repo}blob/{branch}/x-pack/plugins/enterprise_search/README.md[enterpriseSearch]
-|This plugin's goal is to provide a Kibana user interface to the Enterprise Search solution's products (App Search and Workplace Search). In it's current MVP state, the plugin provides the following with the goal of gathering user feedback and raising product awareness:
+|This plugin provides beta Kibana user interfaces for managing the Enterprise Search solution and its products, App Search and Workplace Search.
|{kib-repo}blob/{branch}/x-pack/plugins/event_log/README.md[eventLog]
diff --git a/docs/development/core/server/kibana-plugin-core-server.elasticsearchclientconfig.md b/docs/development/core/server/kibana-plugin-core-server.elasticsearchclientconfig.md
index a854e5ddad19a..208e0e0175d71 100644
--- a/docs/development/core/server/kibana-plugin-core-server.elasticsearchclientconfig.md
+++ b/docs/development/core/server/kibana-plugin-core-server.elasticsearchclientconfig.md
@@ -9,7 +9,7 @@ Configuration options to be used to create a [cluster client](./kibana-plugin-co
Signature:
```typescript
-export declare type ElasticsearchClientConfig = Pick & {
+export declare type ElasticsearchClientConfig = Pick & {
pingTimeout?: ElasticsearchConfig['pingTimeout'] | ClientOptions['pingTimeout'];
requestTimeout?: ElasticsearchConfig['requestTimeout'] | ClientOptions['requestTimeout'];
ssl?: Partial;
diff --git a/docs/development/core/server/kibana-plugin-core-server.elasticsearchconfig.md b/docs/development/core/server/kibana-plugin-core-server.elasticsearchconfig.md
index d87ea63d59b8d..a9ed614ba7552 100644
--- a/docs/development/core/server/kibana-plugin-core-server.elasticsearchconfig.md
+++ b/docs/development/core/server/kibana-plugin-core-server.elasticsearchconfig.md
@@ -31,10 +31,11 @@ export declare class ElasticsearchConfig
| [pingTimeout](./kibana-plugin-core-server.elasticsearchconfig.pingtimeout.md) | | Duration
| Timeout after which PING HTTP request will be aborted and retried. |
| [requestHeadersWhitelist](./kibana-plugin-core-server.elasticsearchconfig.requestheaderswhitelist.md) | | string[]
| List of Kibana client-side headers to send to Elasticsearch when request scoped cluster client is used. If this is an empty array then \*no\* client-side will be sent. |
| [requestTimeout](./kibana-plugin-core-server.elasticsearchconfig.requesttimeout.md) | | Duration
| Timeout after which HTTP request will be aborted and retried. |
+| [serviceAccountToken](./kibana-plugin-core-server.elasticsearchconfig.serviceaccounttoken.md) | | string
| If Elasticsearch security features are enabled, this setting provides the service account token that the Kibana server users to perform its administrative functions.This is an alternative to specifying a username and password. |
| [shardTimeout](./kibana-plugin-core-server.elasticsearchconfig.shardtimeout.md) | | Duration
| Timeout for Elasticsearch to wait for responses from shards. Set to 0 to disable. |
| [sniffInterval](./kibana-plugin-core-server.elasticsearchconfig.sniffinterval.md) | | false | Duration
| Interval to perform a sniff operation and make sure the list of nodes is complete. If false
then sniffing is disabled. |
| [sniffOnConnectionFault](./kibana-plugin-core-server.elasticsearchconfig.sniffonconnectionfault.md) | | boolean
| Specifies whether the client should immediately sniff for a more current list of nodes when a connection dies. |
| [sniffOnStart](./kibana-plugin-core-server.elasticsearchconfig.sniffonstart.md) | | boolean
| Specifies whether the client should attempt to detect the rest of the cluster when it is first instantiated. |
| [ssl](./kibana-plugin-core-server.elasticsearchconfig.ssl.md) | | Pick<SslConfigSchema, Exclude<keyof SslConfigSchema, 'certificateAuthorities' | 'keystore' | 'truststore'>> & {
certificateAuthorities?: string[];
}
| Set of settings configure SSL connection between Kibana and Elasticsearch that are required when xpack.ssl.verification_mode
in Elasticsearch is set to either certificate
or full
. |
-| [username](./kibana-plugin-core-server.elasticsearchconfig.username.md) | | string
| If Elasticsearch is protected with basic authentication, this setting provides the username that the Kibana server uses to perform its administrative functions. |
+| [username](./kibana-plugin-core-server.elasticsearchconfig.username.md) | | string
| If Elasticsearch is protected with basic authentication, this setting provides the username that the Kibana server uses to perform its administrative functions. Cannot be used in conjunction with serviceAccountToken. |
diff --git a/docs/development/core/server/kibana-plugin-core-server.elasticsearchconfig.serviceaccounttoken.md b/docs/development/core/server/kibana-plugin-core-server.elasticsearchconfig.serviceaccounttoken.md
new file mode 100644
index 0000000000000..5934e83de17a4
--- /dev/null
+++ b/docs/development/core/server/kibana-plugin-core-server.elasticsearchconfig.serviceaccounttoken.md
@@ -0,0 +1,15 @@
+
+
+[Home](./index.md) > [kibana-plugin-core-server](./kibana-plugin-core-server.md) > [ElasticsearchConfig](./kibana-plugin-core-server.elasticsearchconfig.md) > [serviceAccountToken](./kibana-plugin-core-server.elasticsearchconfig.serviceaccounttoken.md)
+
+## ElasticsearchConfig.serviceAccountToken property
+
+If Elasticsearch security features are enabled, this setting provides the service account token that the Kibana server users to perform its administrative functions.
+
+This is an alternative to specifying a username and password.
+
+Signature:
+
+```typescript
+readonly serviceAccountToken?: string;
+```
diff --git a/docs/development/core/server/kibana-plugin-core-server.elasticsearchconfig.username.md b/docs/development/core/server/kibana-plugin-core-server.elasticsearchconfig.username.md
index 14db9f2e36ccf..959870ff43a0f 100644
--- a/docs/development/core/server/kibana-plugin-core-server.elasticsearchconfig.username.md
+++ b/docs/development/core/server/kibana-plugin-core-server.elasticsearchconfig.username.md
@@ -4,7 +4,7 @@
## ElasticsearchConfig.username property
-If Elasticsearch is protected with basic authentication, this setting provides the username that the Kibana server uses to perform its administrative functions.
+If Elasticsearch is protected with basic authentication, this setting provides the username that the Kibana server uses to perform its administrative functions. Cannot be used in conjunction with serviceAccountToken.
Signature:
diff --git a/docs/development/core/server/kibana-plugin-core-server.legacyelasticsearchclientconfig.md b/docs/development/core/server/kibana-plugin-core-server.legacyelasticsearchclientconfig.md
index b028a09bee453..a80ebe2fee493 100644
--- a/docs/development/core/server/kibana-plugin-core-server.legacyelasticsearchclientconfig.md
+++ b/docs/development/core/server/kibana-plugin-core-server.legacyelasticsearchclientconfig.md
@@ -11,7 +11,7 @@
Signature:
```typescript
-export declare type LegacyElasticsearchClientConfig = Pick & Pick & {
+export declare type LegacyElasticsearchClientConfig = Pick & Pick & {
pingTimeout?: ElasticsearchConfig['pingTimeout'] | ConfigOptions['pingTimeout'];
requestTimeout?: ElasticsearchConfig['requestTimeout'] | ConfigOptions['requestTimeout'];
sniffInterval?: ElasticsearchConfig['sniffInterval'] | ConfigOptions['sniffInterval'];
diff --git a/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.indexpatterntype.md b/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.indexpatterntype.md
new file mode 100644
index 0000000000000..46fd3a0725e40
--- /dev/null
+++ b/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.indexpatterntype.md
@@ -0,0 +1,19 @@
+
+
+[Home](./index.md) > [kibana-plugin-plugins-data-public](./kibana-plugin-plugins-data-public.md) > [IndexPatternType](./kibana-plugin-plugins-data-public.indexpatterntype.md)
+
+## IndexPatternType enum
+
+Signature:
+
+```typescript
+export declare enum IndexPatternType
+```
+
+## Enumeration Members
+
+| Member | Value | Description |
+| --- | --- | --- |
+| DEFAULT | "default"
| |
+| ROLLUP | "rollup"
| |
+
diff --git a/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.indexpatterntypemeta.md b/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.indexpatterntypemeta.md
index e6690b244c9ea..19a884862d460 100644
--- a/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.indexpatterntypemeta.md
+++ b/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.indexpatterntypemeta.md
@@ -15,4 +15,5 @@ export interface TypeMeta
| Property | Type | Description |
| --- | --- | --- |
| [aggs](./kibana-plugin-plugins-data-public.indexpatterntypemeta.aggs.md) | Record<string, AggregationRestrictions>
| |
+| [params](./kibana-plugin-plugins-data-public.indexpatterntypemeta.params.md) | {
rollup_index: string;
}
| |
diff --git a/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.indexpatterntypemeta.params.md b/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.indexpatterntypemeta.params.md
new file mode 100644
index 0000000000000..12646a39188a0
--- /dev/null
+++ b/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.indexpatterntypemeta.params.md
@@ -0,0 +1,13 @@
+
+
+[Home](./index.md) > [kibana-plugin-plugins-data-public](./kibana-plugin-plugins-data-public.md) > [IndexPatternTypeMeta](./kibana-plugin-plugins-data-public.indexpatterntypemeta.md) > [params](./kibana-plugin-plugins-data-public.indexpatterntypemeta.params.md)
+
+## IndexPatternTypeMeta.params property
+
+Signature:
+
+```typescript
+params?: {
+ rollup_index: string;
+ };
+```
diff --git a/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.md b/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.md
index 65c4601d5faec..7c2911875ee05 100644
--- a/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.md
+++ b/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.md
@@ -31,6 +31,7 @@
| --- | --- |
| [BUCKET\_TYPES](./kibana-plugin-plugins-data-public.bucket_types.md) | |
| [ES\_FIELD\_TYPES](./kibana-plugin-plugins-data-public.es_field_types.md) | \* |
+| [IndexPatternType](./kibana-plugin-plugins-data-public.indexpatterntype.md) | |
| [KBN\_FIELD\_TYPES](./kibana-plugin-plugins-data-public.kbn_field_types.md) | \* |
| [METRIC\_TYPES](./kibana-plugin-plugins-data-public.metric_types.md) | |
| [QuerySuggestionTypes](./kibana-plugin-plugins-data-public.querysuggestiontypes.md) | |
diff --git a/docs/development/plugins/expressions/public/kibana-plugin-plugins-expressions-public.execution.md b/docs/development/plugins/expressions/public/kibana-plugin-plugins-expressions-public.execution.md
index edaf1c9a9ce9e..040bed5a8ce53 100644
--- a/docs/development/plugins/expressions/public/kibana-plugin-plugins-expressions-public.execution.md
+++ b/docs/development/plugins/expressions/public/kibana-plugin-plugins-expressions-public.execution.md
@@ -39,5 +39,5 @@ export declare class Execution N.B. input
is initialized to null
rather than undefined
for legacy reasons, because in legacy interpreter it was set to null
by default. |
+| [start(input, isSubExpression)](./kibana-plugin-plugins-expressions-public.execution.start.md) | | Call this method to start execution.N.B. input
is initialized to null
rather than undefined
for legacy reasons, because in legacy interpreter it was set to null
by default. |
diff --git a/docs/development/plugins/expressions/public/kibana-plugin-plugins-expressions-public.execution.start.md b/docs/development/plugins/expressions/public/kibana-plugin-plugins-expressions-public.execution.start.md
index 352226da6d72a..b1fa6d7d518b9 100644
--- a/docs/development/plugins/expressions/public/kibana-plugin-plugins-expressions-public.execution.start.md
+++ b/docs/development/plugins/expressions/public/kibana-plugin-plugins-expressions-public.execution.start.md
@@ -11,7 +11,7 @@ N.B. `input` is initialized to `null` rather than `undefined` for legacy reasons
Signature:
```typescript
-start(input?: Input): Observable>;
+start(input?: Input, isSubExpression?: boolean): Observable>;
```
## Parameters
@@ -19,6 +19,7 @@ start(input?: Input): Observable>
| Parameter | Type | Description |
| --- | --- | --- |
| input | Input
| |
+| isSubExpression | boolean
| |
Returns:
diff --git a/docs/development/plugins/expressions/server/kibana-plugin-plugins-expressions-server.execution.md b/docs/development/plugins/expressions/server/kibana-plugin-plugins-expressions-server.execution.md
index 47963e5e5ef46..44d16ea02e270 100644
--- a/docs/development/plugins/expressions/server/kibana-plugin-plugins-expressions-server.execution.md
+++ b/docs/development/plugins/expressions/server/kibana-plugin-plugins-expressions-server.execution.md
@@ -39,5 +39,5 @@ export declare class Execution N.B. input
is initialized to null
rather than undefined
for legacy reasons, because in legacy interpreter it was set to null
by default. |
+| [start(input, isSubExpression)](./kibana-plugin-plugins-expressions-server.execution.start.md) | | Call this method to start execution.N.B. input
is initialized to null
rather than undefined
for legacy reasons, because in legacy interpreter it was set to null
by default. |
diff --git a/docs/development/plugins/expressions/server/kibana-plugin-plugins-expressions-server.execution.start.md b/docs/development/plugins/expressions/server/kibana-plugin-plugins-expressions-server.execution.start.md
index 0eef7013cb3c6..23b4d414d09d1 100644
--- a/docs/development/plugins/expressions/server/kibana-plugin-plugins-expressions-server.execution.start.md
+++ b/docs/development/plugins/expressions/server/kibana-plugin-plugins-expressions-server.execution.start.md
@@ -11,7 +11,7 @@ N.B. `input` is initialized to `null` rather than `undefined` for legacy reasons
Signature:
```typescript
-start(input?: Input): Observable>;
+start(input?: Input, isSubExpression?: boolean): Observable>;
```
## Parameters
@@ -19,6 +19,7 @@ start(input?: Input): Observable>
| Parameter | Type | Description |
| --- | --- | --- |
| input | Input
| |
+| isSubExpression | boolean
| |
Returns:
diff --git a/docs/getting-started/images/add-sample-data.png b/docs/getting-started/images/add-sample-data.png
index 9dee27dcde71b..07a536b19d7d0 100644
Binary files a/docs/getting-started/images/add-sample-data.png and b/docs/getting-started/images/add-sample-data.png differ
diff --git a/docs/getting-started/images/tutorial-discover-3.png b/docs/getting-started/images/tutorial-discover-3.png
index b024ad6dc39fe..79cf94058bb76 100644
Binary files a/docs/getting-started/images/tutorial-discover-3.png and b/docs/getting-started/images/tutorial-discover-3.png differ
diff --git a/docs/getting-started/images/tutorial-discover-4.png b/docs/getting-started/images/tutorial-discover-4.png
index 945a6155c02cd..584221e8cfd04 100644
Binary files a/docs/getting-started/images/tutorial-discover-4.png and b/docs/getting-started/images/tutorial-discover-4.png differ
diff --git a/docs/getting-started/images/tutorial-final-dashboard.gif b/docs/getting-started/images/tutorial-final-dashboard.gif
index 53b7bc04c5f65..6c82c4e53ca10 100644
Binary files a/docs/getting-started/images/tutorial-final-dashboard.gif and b/docs/getting-started/images/tutorial-final-dashboard.gif differ
diff --git a/docs/getting-started/images/tutorial-sample-dashboard.png b/docs/getting-started/images/tutorial-sample-dashboard.png
index 4c95c04c5e43e..5e06009d0824e 100644
Binary files a/docs/getting-started/images/tutorial-sample-dashboard.png and b/docs/getting-started/images/tutorial-sample-dashboard.png differ
diff --git a/docs/getting-started/images/tutorial-sample-filter.png b/docs/getting-started/images/tutorial-sample-filter.png
index 56ebacadbef45..8823da311ebb5 100644
Binary files a/docs/getting-started/images/tutorial-sample-filter.png and b/docs/getting-started/images/tutorial-sample-filter.png differ
diff --git a/docs/getting-started/images/tutorial-sample-filter2.png b/docs/getting-started/images/tutorial-sample-filter2.png
index 21402feacdecd..4215b63d89fa1 100644
Binary files a/docs/getting-started/images/tutorial-sample-filter2.png and b/docs/getting-started/images/tutorial-sample-filter2.png differ
diff --git a/docs/getting-started/images/tutorial-sample-query.png b/docs/getting-started/images/tutorial-sample-query.png
deleted file mode 100644
index 4f1ca24924b28..0000000000000
Binary files a/docs/getting-started/images/tutorial-sample-query.png and /dev/null differ
diff --git a/docs/getting-started/images/tutorial-sample-query2.png b/docs/getting-started/images/tutorial-sample-query2.png
deleted file mode 100644
index 0e91e1069a201..0000000000000
Binary files a/docs/getting-started/images/tutorial-sample-query2.png and /dev/null differ
diff --git a/docs/getting-started/images/tutorial-treemap.png b/docs/getting-started/images/tutorial-treemap.png
deleted file mode 100644
index 32e14fd2308e3..0000000000000
Binary files a/docs/getting-started/images/tutorial-treemap.png and /dev/null differ
diff --git a/docs/getting-started/images/tutorial-visualization-dropdown.png b/docs/getting-started/images/tutorial-visualization-dropdown.png
index 29d1b99700964..a069af95ed14a 100644
Binary files a/docs/getting-started/images/tutorial-visualization-dropdown.png and b/docs/getting-started/images/tutorial-visualization-dropdown.png differ
diff --git a/docs/getting-started/images/tutorial-visualization-treemap.png b/docs/getting-started/images/tutorial-visualization-treemap.png
new file mode 100644
index 0000000000000..c6e8db133cb44
Binary files /dev/null and b/docs/getting-started/images/tutorial-visualization-treemap.png differ
diff --git a/docs/getting-started/quick-start-guide.asciidoc b/docs/getting-started/quick-start-guide.asciidoc
index d9835b312f3ee..ed249008ac8de 100644
--- a/docs/getting-started/quick-start-guide.asciidoc
+++ b/docs/getting-started/quick-start-guide.asciidoc
@@ -7,7 +7,7 @@ When you've finished, you'll know how to:
* <>
-* <>
+* <>
[float]
=== Required privileges
@@ -24,125 +24,125 @@ include::{docs-root}/shared/cloud/ess-getting-started.asciidoc[]
[[gs-get-data-into-kibana]]
== Add the sample data
-Sample data sets come with sample visualizations, dashboards, and more to help you explore {kib} without adding your own data.
+Sample data sets come with sample visualizations, dashboards, and more to help you explore {kib} before you ingest or add your own data.
-. From the home page, click *Try our sample data*.
+. On the home page, click *Try our sample data*.
. On the *Sample eCommerce orders* card, click *Add data*.
+
[role="screenshot"]
-image::getting-started/images/add-sample-data.png[Add data UI]
+image::images/add-sample-data.png[Add data UI for the sample data sets]
[float]
[[explore-the-data]]
== Explore the data
-*Discover* displays an interactive histogram that shows the distribution of of data, or documents, over time, and a table that lists the fields for each document that matches the index. By default, all fields are shown for each matching document.
+*Discover* displays the data in an interactive histogram that shows the distribution of data, or documents, over time, and a table that lists the fields for each document that matches the index pattern. To view a subset of the documents, you can apply filters to the data, and customize the table to display only the fields you want to explore.
. Open the main menu, then click *Discover*.
. Change the <> to *Last 7 days*.
+
[role="screenshot"]
-image::images/tutorial-discover-2.png[]
+image::images/tutorial-discover-2.png[Time filter menu with Last 7 days filter configured]
-. To focus in on the documents you want to view, use the <>. In the *KQL* search field, enter:
+. To view the sales orders for women's clothing that are $60 or more, use the <> search field:
+
[source,text]
-products.taxless_price >= 60 AND category : Women's Clothing
-+
-The query returns the women's clothing orders for $60 and more.
+products.taxless_price >= 60 and category : Women's Clothing
+
[role="screenshot"]
-image::images/tutorial-discover-4.png[]
+image::images/tutorial-discover-4.png[Discover tables that displays only the orders for women's clothing that are $60 or more]
-. Hover over the list of *Available fields*, then click *+* next to the fields you want to view in the table.
-+
-For example, when you add the *category* field, the table displays the product categories for the orders.
+. To view only the product categories that contain sales orders, hover over the *category* field, then click *+*.
+
[role="screenshot"]
-image::images/tutorial-discover-3.png[]
-+
-For more information, refer to <>.
+image::images/tutorial-discover-3.png[Discover table that displays only the product categories that contain orders]
[float]
[[view-and-analyze-the-data]]
== View and analyze the data
-A dashboard is a collection of panels that you can use to view and analyze the data. Panels contain visualizations, interactive controls, Markdown, and more.
+A dashboard is a collection of panels that you can use to view and analyze the data. Panels contain visualizations, interactive controls, text, and more.
. Open the main menu, then click *Dashboard*.
. Click *[eCommerce] Revenue Dashboard*.
+
[role="screenshot"]
-image::getting-started/images/tutorial-sample-dashboard.png[]
+image::getting-started/images/tutorial-sample-dashboard.png[The [eCommerce] Revenue Dashboard that comes with the Sample eCommerce order data set]
[float]
-[[filter-and-query-the-data]]
-=== Filter the data
+[[create-a-visualization]]
+=== Create a visualization panel
+
+Create a treemap panel that shows the top sales regions and manufacturers, then add the panel to the dashboard.
-To focus in on the data you want to view on the dashboard, use filters.
+. From the toolbar, click *Edit*, then click *Create visualzation*.
-. From the *[eCommerce] Controls* panel, make a selection from the *Manufacturer* and *Category* dropdowns, then click *Apply changes*.
+. Open the *Chart type* menu, then select *Treemap*.
+
-For example, the following dashboard shows the data for women's clothing from Gnomehouse.
+[role="screenshot"]
+image::getting-started/images/tutorial-visualization-dropdown.png[Chart type menu with Treemap selected]
+
+. From the *Available fields* list, drag and drop the following fields onto the workspace:
+
+* *geoip.city_name*
+
+* *manufacturer.keyword*
+
[role="screenshot"]
-image::getting-started/images/tutorial-sample-filter.png[]
+image::getting-started/images/tutorial-visualization-treemap.png[Treemap that displays Top values of geoip.city_name and Top values or manufacturer.keyword fields]
-. To manually add a filter, click *Add filter*, then specify the options.
+. Click *Save and return*.
+
-For example, to view the orders for Wednesday, select *day_of_week* from the *Field* dropdown, select *is* from the *Operator* dropdown, then select *Wednesday* from the *Value* dropdown.
+The treemap appears as the last visualization panel on the dashboard.
+
[role="screenshot"]
-image::getting-started/images/tutorial-sample-filter2.png[]
+image::getting-started/images/tutorial-final-dashboard.gif[Final dashboard with new treemap visualization]
+
+[float]
+[[interact-with-the-data]]
+=== Interact with the data
+
+You can interact with the dashboard data using controls that allow you to apply dashboard-level filters. Interact with the *[eCommerce] Controls* panel to view the women's clothing data from the Gnomehouse manufacturer.
-. When you are done, remove the filters.
+. From the *Manufacturer* dropdown, select *Gnomehouse*.
+
+. From the *Category* dropdown, select *Women's Clothing*.
+
+. Click *Apply changes*.
+
-For more information, refer to <>.
+[role="screenshot"]
+image::getting-started/images/tutorial-sample-filter.png[The [eCommerce] Revenue Dashboard that shows only the women's clothing data from the Gnomehouse manufacturer]
[float]
-[[create-a-visualization]]
-=== Create a visualization panel
-
-Create a treemap panel that shows the top regions and manufacturers, then add the panel to the dashboard.
+[[filter-and-query-the-data]]
+=== Filter the data
-. From the toolbar, click *Edit*, then click *Create new*.
+To view a subset of the data, you can apply filters to the dashboard panels. Apply a filter to view the women's clothing data generated on Wednesday from the Gnomehouse manufacturer.
-. On the *New Visualization* window, click *Lens*.
+. Click *Add filter*.
-. From the *Available fields* list, drag and drop the following fields to the visualization builder:
+. From the *Field* dropdown, select *day_of_week*.
-* *geoip.city_name*
+. From the *Operator* dropdown, select *is*.
-* *manufacturer.keyword*
-+
-. From the visualization dropdown, select *Treemap*.
-+
-[role="screenshot"]
-image::getting-started/images/tutorial-visualization-dropdown.png[Visualization dropdown with Treemap selected]
+. From the *Value* dropdown, select *Wednesday*.
. Click *Save*.
-
-. On the *Save Lens visualization*, enter a title and make sure *Add to Dashboard after saving* is selected, then click *Save and return*.
-+
-The treemap appears as the last visualization panel on the dashboard.
+
[role="screenshot"]
-image::getting-started/images/tutorial-final-dashboard.gif[Final dashboard with new treemap visualization]
-+
-For more information, refer to <>.
+image::getting-started/images/tutorial-sample-filter2.png[The [eCommerce] Revenue Dashboard that shows only the women's clothing data generated on Wednesday from the Gnomehouse manufacturer]
[float]
[[quick-start-whats-next]]
== What's next?
-If you are you ready to add your own data, refer to <>.
+*Add your own data.* Ready to add your own data? Go to {fleet-guide}/fleet-quick-start.html[Quick start: Get logs and metrics into the Elastic Stack] to learn how to ingest your data, or go to <> and learn about all the other ways you can add data.
-If you want to ingest your data, refer to {fleet-guide}/fleet-quick-start.html[Quick start: Get logs and metrics into the Elastic Stack].
+*Explore your own data in Discover.* Ready to learn more about exploring your data in *Discover*? Go to <>.
-If you want to secure access to your data, refer to our guide on <>
+*Create a dashboard with your own data.* Ready to learn more about analyzing your data in *Dashboard*? Go to <>.
-If you want to try out {ml-features} with the sample data sets, refer to
-{ml-docs}/ml-getting-started.html[Getting started with {ml}].
\ No newline at end of file
+*Try out the {ml-features}.* Ready to analyze the sample data sets and generate models for its patterns of behavior? Go to {ml-docs}/ml-getting-started.html[Getting started with {ml}].
\ No newline at end of file
diff --git a/docs/management/images/management-create-rollup-bar-chart.png b/docs/management/images/management-create-rollup-bar-chart.png
index 324cfcb9ee5fb..68ba4344c0ecf 100644
Binary files a/docs/management/images/management-create-rollup-bar-chart.png and b/docs/management/images/management-create-rollup-bar-chart.png differ
diff --git a/docs/management/images/management-rollup-index-pattern.png b/docs/management/images/management-rollup-index-pattern.png
index 57ac00be7977c..de7976e63f050 100644
Binary files a/docs/management/images/management-rollup-index-pattern.png and b/docs/management/images/management-rollup-index-pattern.png differ
diff --git a/docs/management/images/management_create_rollup_job.png b/docs/management/images/management_create_rollup_job.png
index c3139c9f8df1a..f1dd1580c3a86 100644
Binary files a/docs/management/images/management_create_rollup_job.png and b/docs/management/images/management_create_rollup_job.png differ
diff --git a/docs/management/images/management_rollup_job_dashboard.png b/docs/management/images/management_rollup_job_dashboard.png
index d3c394183cad8..9573ab7a863e8 100644
Binary files a/docs/management/images/management_rollup_job_dashboard.png and b/docs/management/images/management_rollup_job_dashboard.png differ
diff --git a/docs/management/images/management_rollup_job_details.png b/docs/management/images/management_rollup_job_details.png
index e6e93a6dae130..5372ba4ad7d13 100644
Binary files a/docs/management/images/management_rollup_job_details.png and b/docs/management/images/management_rollup_job_details.png differ
diff --git a/docs/management/images/management_rollup_job_vis.png b/docs/management/images/management_rollup_job_vis.png
deleted file mode 100644
index e1f46e4db5c0a..0000000000000
Binary files a/docs/management/images/management_rollup_job_vis.png and /dev/null differ
diff --git a/docs/management/images/management_rollup_list.png b/docs/management/images/management_rollup_list.png
index 60e9a35071003..505930bcb9a38 100644
Binary files a/docs/management/images/management_rollup_list.png and b/docs/management/images/management_rollup_list.png differ
diff --git a/docs/management/rollups/create_and_manage_rollups.asciidoc b/docs/management/rollups/create_and_manage_rollups.asciidoc
index bde2ca472b258..51821a935d3f5 100644
--- a/docs/management/rollups/create_and_manage_rollups.asciidoc
+++ b/docs/management/rollups/create_and_manage_rollups.asciidoc
@@ -64,13 +64,16 @@ You can read more at {ref}/rollup-job-config.html[rollup job configuration].
=== Try it: Create and visualize rolled up data
This example creates a rollup job to capture log data from sample web logs.
-To follow along, add the sample web logs data set.
+Before you start, <>.
In this example, you want data that is older than 7 days in the target index pattern `kibana_sample_data_logs`
-to roll up once a day into the index `rollup_logstash`. You’ll bucket the
+to roll up into the `rollup_logstash` index. You’ll bucket the
rolled up data on an hourly basis, using 60m for the time bucket configuration.
This allows for more granular queries, such as 2h and 12h.
+For this example, the job will perform the rollup every minute. However, you'd
+typically roll up less frequently in production.
+
[float]
==== Create the rollup job
@@ -80,7 +83,7 @@ As you walk through the *Create rollup job* UI, enter the data:
|*Field* |*Value*
|Name
-|logs_job
+|`logs_job`
|Index pattern
|`kibana_sample_data_logs`
@@ -89,12 +92,13 @@ As you walk through the *Create rollup job* UI, enter the data:
|`rollup_logstash`
|Frequency
-|Every day at midnight
+|Every minute
|Page size
|1000
-|Delay (latency buffer)|7d
+|Latency buffer
+|7d
|Date field
|@timestamp
@@ -118,6 +122,8 @@ As you walk through the *Create rollup job* UI, enter the data:
|bytes (average)
|===
+On the **Review and save** page, click **Start job now** and **Save**.
+
The terms, histogram, and metrics fields reflect
the key information to retain in the rolled up data: where visitors are from (geo.src),
what operating system they are using (machine.os.keyword),
@@ -133,7 +139,6 @@ rollup index, or you can remove or archive it using < Index Patterns*.
. Click *Create index pattern*, and select *Rollup index pattern* from the dropdown.
@@ -149,7 +154,11 @@ is `rollup_logstash,kibana_sample_data_logs`. In this index pattern, `rollup_log
matches the rolled up index pattern and `kibana_sample_data_logs` matches the index
pattern for raw data.
-. Open the main menu, click *Dashboard*, then create and add a vertical bar chart.
+. Open the main menu, click *Dashboard*, then *Create dashboard*.
+
+. Set the <> to *Last 90 days*.
+
+. On the dashboard, click *Create visualization*.
. Choose `rollup_logstash,kibana_sample_data_logs`
as your source to see both the raw and rolled up data.
@@ -157,13 +166,15 @@ as your source to see both the raw and rolled up data.
[role="screenshot"]
image::images/management-create-rollup-bar-chart.png[][Create visualization of rolled up data]
-. Look at the data in your visualization.
-+
-[role="screenshot"]
-image::images/management_rollup_job_vis.png[][Visualization of rolled up data]
+. Select *Bar vertical stacked* in the chart type dropdown.
-. Optionally, create a dashboard that contains visualizations of the rolled up
-data, raw data, or both.
+. Add the `@timestamp` field to the *Horizontal axis*.
+
+. Add the `bytes` field to the *Vertical axis*, defaulting to an `Average of
+bytes`.
++
+{kib} creates a vertical bar chart of your data. Select a section of the chart
+to zoom in.
+
[role="screenshot"]
image::images/management_rollup_job_dashboard.png[][Dashboard with rolled up data]
diff --git a/docs/management/snapshot-restore/images/create-policy-example.png b/docs/management/snapshot-restore/images/create-policy-example.png
old mode 100755
new mode 100644
index e871c925f5fd5..4ab5e438b306b
Binary files a/docs/management/snapshot-restore/images/create-policy-example.png and b/docs/management/snapshot-restore/images/create-policy-example.png differ
diff --git a/docs/management/snapshot-restore/images/create-policy.png b/docs/management/snapshot-restore/images/create-policy.png
old mode 100755
new mode 100644
index d9a0dce0f4190..3ba33e2522bd5
Binary files a/docs/management/snapshot-restore/images/create-policy.png and b/docs/management/snapshot-restore/images/create-policy.png differ
diff --git a/docs/management/snapshot-restore/images/create_snapshot.png b/docs/management/snapshot-restore/images/create_snapshot.png
deleted file mode 100644
index 14c1229a23ce1..0000000000000
Binary files a/docs/management/snapshot-restore/images/create_snapshot.png and /dev/null differ
diff --git a/docs/management/snapshot-restore/images/register_repo.png b/docs/management/snapshot-restore/images/register_repo.png
old mode 100755
new mode 100644
index 9e7ee9db4ce91..c742028ce108c
Binary files a/docs/management/snapshot-restore/images/register_repo.png and b/docs/management/snapshot-restore/images/register_repo.png differ
diff --git a/docs/management/snapshot-restore/images/repository_list.png b/docs/management/snapshot-restore/images/repository_list.png
old mode 100755
new mode 100644
index a4678e87bfb2c..c4eb4fc1a3d1a
Binary files a/docs/management/snapshot-restore/images/repository_list.png and b/docs/management/snapshot-restore/images/repository_list.png differ
diff --git a/docs/management/snapshot-restore/images/restore-status.png b/docs/management/snapshot-restore/images/restore-status.png
deleted file mode 100755
index fa48e32d2fef3..0000000000000
Binary files a/docs/management/snapshot-restore/images/restore-status.png and /dev/null differ
diff --git a/docs/management/snapshot-restore/images/snapshot-restore.png b/docs/management/snapshot-restore/images/snapshot-restore.png
old mode 100755
new mode 100644
index 41a292f97c853..8ca5dc95e5892
Binary files a/docs/management/snapshot-restore/images/snapshot-restore.png and b/docs/management/snapshot-restore/images/snapshot-restore.png differ
diff --git a/docs/management/snapshot-restore/images/snapshot-retention.png b/docs/management/snapshot-restore/images/snapshot-retention.png
old mode 100755
new mode 100644
index 7b390357a21b6..44dfecc1a3321
Binary files a/docs/management/snapshot-restore/images/snapshot-retention.png and b/docs/management/snapshot-restore/images/snapshot-retention.png differ
diff --git a/docs/management/snapshot-restore/images/snapshot_details.png b/docs/management/snapshot-restore/images/snapshot_details.png
old mode 100755
new mode 100644
index 2bd226eecd84e..e6c463d7acb7f
Binary files a/docs/management/snapshot-restore/images/snapshot_details.png and b/docs/management/snapshot-restore/images/snapshot_details.png differ
diff --git a/docs/management/snapshot-restore/images/snapshot_list.png b/docs/management/snapshot-restore/images/snapshot_list.png
old mode 100755
new mode 100644
index dcbb43ec2ab84..f844bfddac4be
Binary files a/docs/management/snapshot-restore/images/snapshot_list.png and b/docs/management/snapshot-restore/images/snapshot_list.png differ
diff --git a/docs/management/snapshot-restore/images/snapshot_permissions.png b/docs/management/snapshot-restore/images/snapshot_permissions.png
deleted file mode 100644
index 463d4d6e389c6..0000000000000
Binary files a/docs/management/snapshot-restore/images/snapshot_permissions.png and /dev/null differ
diff --git a/docs/management/snapshot-restore/index.asciidoc b/docs/management/snapshot-restore/index.asciidoc
index 62633441ef161..b041bd0873a05 100644
--- a/docs/management/snapshot-restore/index.asciidoc
+++ b/docs/management/snapshot-restore/index.asciidoc
@@ -2,8 +2,8 @@
[[snapshot-repositories]]
== Snapshot and Restore
-*Snapshot and Restore* enables you to backup your {es}
-indices and clusters using data and state snapshots.
+*Snapshot and Restore* lets you back up a running {es}
+cluster using data and state snapshots.
Snapshots are important because they provide a copy of your data in case
something goes wrong. If you need to roll back to an older version of your data,
you can restore a snapshot from the repository.
@@ -34,17 +34,12 @@ The minimum required permissions to access *Snapshot and Restore* include:
To add privileges, open the main menu, then click *Stack Management > Roles*.
-[role="screenshot"]
-image:management/snapshot-restore/images/snapshot_permissions.png["Edit Role"]
-
[float]
[[kib-snapshot-register-repository]]
=== Register a repository
A repository is where your snapshots live. You must register a snapshot
repository before you can perform snapshot and restore operations.
-If you don't have a repository, Kibana walks you through the process of
-registering one.
{kib} supports three repository types
out of the box: shared file system, read-only URL, and source-only.
For more information on these repositories and their settings,
@@ -52,11 +47,9 @@ see {ref}/snapshots-register-repository.html[Repositories].
To use other repositories, such as S3, see
{ref}/snapshots-register-repository.html#snapshots-repository-plugins[Repository plugins].
-
-Once you create a repository, it is listed in the *Repositories*
-view.
-Click a repository name to view its type, number of snapshots, and settings,
-and to verify status.
+The *Repositories* view displays a list of registered repositories. Click a
+repository name to view information about the repository, verify its status, or
+clean it up.
[role="screenshot"]
image:management/snapshot-restore/images/repository_list.png["Repository list"]
@@ -73,15 +66,8 @@ into each snapshot for further investigation.
[role="screenshot"]
image:management/snapshot-restore/images/snapshot_details.png["Snapshot details"]
-If you don’t have any snapshots, you can create them from the {kib} <>. The
-{ref}/snapshots-take-snapshot.html[snapshot API]
-takes the current state and data in your index or cluster, and then saves it to a
-shared repository.
-
-The snapshot process is "smart." Your first snapshot is a complete copy of
-the data in your index or cluster.
-All subsequent snapshots save the changes between the existing snapshots and
-the new data.
+If you don’t have any snapshots, you can create them using the
+{ref}/create-snapshot-api.html[create snapshot API].
[float]
[[kib-restore-snapshot]]
@@ -93,14 +79,14 @@ restore a snapshot made from one cluster to another cluster. You might
use the restore operation to:
* Recover data lost due to a failure
-* Migrate a current Elasticsearch cluster to a new version
+* Migrate an {es} cluster to a new version
* Move data from one cluster to another cluster
To get started, go to the *Snapshots* view, find the
snapshot, and click the restore icon in the *Actions* column.
The Restore wizard presents
options for the restore operation, including which
-indices to restore and whether to modify the index settings.
+data streams and indices to restore and whether to change index settings.
You can restore an existing index only if it’s closed and has the same
number of shards as the index in the snapshot.
@@ -119,7 +105,7 @@ Use a {ref}/snapshot-lifecycle-management-api.html[snapshot lifecycle policy]
to automate the creation and deletion
of cluster snapshots. Taking automatic snapshots:
-* Ensures your {es} indices and clusters are backed up on a regular basis
+* Ensures your {es} data is backed up on a regular basis
* Ensures a recent and relevant snapshot is available if a situation
arises where a cluster needs to be recovered
* Allows you to manage your snapshots in {kib}, instead of using a
@@ -138,8 +124,8 @@ You can drill down into each policy to examine its settings and last successful
You can perform the following actions on a snapshot policy:
-* *Run* a policy immediately without waiting for the scheduled time.
-This action is useful before an upgrade or before performing maintenance on indices.
+* *Run* a policy immediately without waiting for the scheduled time. This action
+is useful before an upgrade or before performing maintenance.
* *Edit* a policy and immediately apply changes to the schedule.
* *Delete* a policy to prevent any future snapshots from being taken.
This action does not cancel any currently ongoing snapshots or remove any previously taken snapshots.
@@ -160,7 +146,7 @@ and then click *Delete snapshots*.
[role="xpack"]
[[snapshot-restore-tutorial]]
-=== Tutorial: Snapshot and Restore
+=== Tutorial: Snapshot and Restore
Ready to try *Snapshot and Restore*? In this tutorial, you'll learn to:
@@ -174,15 +160,12 @@ Ready to try *Snapshot and Restore*? In this tutorial, you'll learn to:
This example shows you how to register a shared file system repository
and store snapshots.
-Before you begin, you must register the location of the repository in the
-{ref}/snapshots-register-repository.html#snapshots-filesystem-repository[path.repo] setting on
-your master and data nodes. You can do this in one of two ways:
-* Edit your `elasticsearch.yml` to include the `path.repo` setting.
-
-* Pass the `path.repo` setting when you start Elasticsearch.
-+
-`bin/elasticsearch -E path.repo=/tmp/es-backups`
+Before you begin, you must first mount the file system to the same location on
+all master and data nodes. Then add the file system’s path or parent directory
+to the
+{ref}/snapshots-register-repository.html#snapshots-filesystem-repository[`path.repo`]
+setting in `elasticsearch.yml` for each master and data node.
[float]
[[register-repo-example]]
@@ -216,13 +199,10 @@ Use the {ref}/snapshots-take-snapshot.html[snapshot API] to create a snapshot.
. Create the snapshot:
+
[source,js]
-PUT /_snapshot/my_backup/2019-04-25_snapshot?wait_for_completion=true
+PUT /_snapshot/my_backup/2099-04-25_snapshot?wait_for_completion=true
+
-In this example, the snapshot name is `2019-04-25_snapshot`. You can also
+In this example, the snapshot name is `2099-04-25_snapshot`. You can also
use {ref}/date-math-index-names.html[date math expression] for the snapshot name.
-+
-[role="screenshot"]
-image:management/snapshot-restore/images/create_snapshot.png["Create snapshot"]
. Return to *Snapshot and Restore*.
+
@@ -251,16 +231,17 @@ image:management/snapshot-restore/images/create-policy-example.png["Create polic
|Snapshot name
|``
-|Schedule
-|Every day at 1:30 a.m.
-
|Repository
|`my_backup`
+|Schedule
+|Every day at 1:30 a.m.
+
|*Snapshot settings* |
-|Indices
-|Select the indices to back up. By default, all indices, including system indices, are backed up.
+|Data streams and indices
+|Select the data streams and indices to back up. By default, all data streams
+and indices, including system indices, are backed up.
|All other settings
|Use the defaults.
@@ -280,20 +261,22 @@ Your new policy is listed in the *Policies* view, and you see a summary of its d
[[restore-snapshot-example]]
==== Restore a snapshot
-Finally, you'll restore indices from an existing snapshot.
+Finally, you'll restore data streams and indices from an existing snapshot.
-. In the *Snapshots* view, find the snapshot you want to restore, for example `2019-04-25_snapshot`.
+. In the *Snapshots* view, find the snapshot you want to restore, for example `2099-04-25_snapshot`.
. Click the restore icon in the *Actions* column.
. As you walk through the wizard, enter the following values:
+
|===
|*Logistics* |
-|Indices
-|Toggle to choose specific indices to restore, or leave in place to restore all indices.
+|Data streams and indices
+|Toggle to choose specific data streams and indices to restore. Use the default
+to restore all data streams and indices in the snapshot.
-|Rename indices
-|Toggle to give your restored indices new names, or leave in place to restore under original index names.
+|Rename data streams and indices
+|Toggle to give your restored data streams and indices new names. Use the
+default to restore the original data stream and index names.
|All other fields
|Use the defaults.
@@ -313,4 +296,4 @@ or leave in place to keep existing settings.
+
The operation loads for a few seconds,
and then you’re navigated to *Restore Status*,
-where you can monitor the status of your restored indices.
+where you can monitor the status of your restored data streams and indices.
diff --git a/docs/maps/images/gs_dashboard_with_map.png b/docs/maps/images/gs_dashboard_with_map.png
index 49b71c16c12b2..a4bf95948edf0 100644
Binary files a/docs/maps/images/gs_dashboard_with_map.png and b/docs/maps/images/gs_dashboard_with_map.png differ
diff --git a/docs/maps/images/gs_dashboard_with_terms_filter.png b/docs/maps/images/gs_dashboard_with_terms_filter.png
index 21b5c044cb35d..bf84b2ee371af 100644
Binary files a/docs/maps/images/gs_dashboard_with_terms_filter.png and b/docs/maps/images/gs_dashboard_with_terms_filter.png differ
diff --git a/docs/maps/images/layer_search.png b/docs/maps/images/layer_search.png
index 8e0e8ff628953..d3828ed5f4551 100644
Binary files a/docs/maps/images/layer_search.png and b/docs/maps/images/layer_search.png differ
diff --git a/docs/maps/images/quantitative_data_driven_styling.png b/docs/maps/images/quantitative_data_driven_styling.png
index a7852ed202016..03dc22f433eee 100644
Binary files a/docs/maps/images/quantitative_data_driven_styling.png and b/docs/maps/images/quantitative_data_driven_styling.png differ
diff --git a/docs/maps/images/sample_data_ecommerce.png b/docs/maps/images/sample_data_ecommerce.png
index 5b261bb535022..7fba3da608d15 100644
Binary files a/docs/maps/images/sample_data_ecommerce.png and b/docs/maps/images/sample_data_ecommerce.png differ
diff --git a/docs/maps/images/sample_data_web_logs.png b/docs/maps/images/sample_data_web_logs.png
index f4f4de88f1992..e4902c3e89610 100644
Binary files a/docs/maps/images/sample_data_web_logs.png and b/docs/maps/images/sample_data_web_logs.png differ
diff --git a/docs/maps/images/vector_style_class.png b/docs/maps/images/vector_style_class.png
index 8c685dfcf0ab6..69549b9f5f2d8 100644
Binary files a/docs/maps/images/vector_style_class.png and b/docs/maps/images/vector_style_class.png differ
diff --git a/docs/maps/images/vector_style_dynamic.png b/docs/maps/images/vector_style_dynamic.png
index aeaef412b5220..3032e74180afa 100644
Binary files a/docs/maps/images/vector_style_dynamic.png and b/docs/maps/images/vector_style_dynamic.png differ
diff --git a/docs/maps/images/vector_style_static.png b/docs/maps/images/vector_style_static.png
index 47d9c3b21fcb6..34908aa02fac7 100644
Binary files a/docs/maps/images/vector_style_static.png and b/docs/maps/images/vector_style_static.png differ
diff --git a/docs/maps/vector-layer.asciidoc b/docs/maps/vector-layer.asciidoc
index 2115c16a889c6..5017ecf91dffd 100644
--- a/docs/maps/vector-layer.asciidoc
+++ b/docs/maps/vector-layer.asciidoc
@@ -20,10 +20,10 @@ The index must contain at least one field mapped as {ref}/geo-point.html[geo_poi
Results are limited to the `index.max_result_window` index setting, which defaults to 10000.
Select the appropriate *Scaling* option for your use case.
+
-* *Limit results to 10000.* The layer displays features from the first `index.max_result_window` documents.
+* *Limit results to 10,000* The layer displays features from the first `index.max_result_window` documents.
Results exceeding `index.max_result_window` are not displayed.
-* *Show clusters when results exceed 10000.* When results exceed `index.max_result_window`, the layer uses {ref}/search-aggregations-bucket-geotilegrid-aggregation.html[GeoTile grid aggregation] to group your documents into clusters and displays metrics for each cluster. When results are less then `index.max_result_window`, the layer displays features from individual documents.
+* *Show clusters when results exceed 10,000* When results exceed `index.max_result_window`, the layer uses {ref}/search-aggregations-bucket-geotilegrid-aggregation.html[GeoTile grid aggregation] to group your documents into clusters and displays metrics for each cluster. When results are less then `index.max_result_window`, the layer displays features from individual documents.
* *Use vector tiles.* Vector tiles partition your map into 6 to 8 tiles.
Each tile request is limited to the `index.max_result_window` index setting.
diff --git a/docs/settings/apm-settings.asciidoc b/docs/settings/apm-settings.asciidoc
index 79fa9a642428a..dfb239f0e26c0 100644
--- a/docs/settings/apm-settings.asciidoc
+++ b/docs/settings/apm-settings.asciidoc
@@ -18,7 +18,7 @@ It is enabled by default.
// Any changes made in this file will be seen there as well.
// tag::apm-indices-settings[]
-Index defaults can be changed in Kibana. Open the main menu, then click *APM > Settings > Indices*.
+Index defaults can be changed in the APM app. Select **Settings** > **Indices**.
Index settings in the APM app take precedence over those set in `kibana.yml`.
[role="screenshot"]
diff --git a/docs/setup/settings.asciidoc b/docs/setup/settings.asciidoc
index ba333deeb1609..15abd0fa4ad96 100644
--- a/docs/setup/settings.asciidoc
+++ b/docs/setup/settings.asciidoc
@@ -284,6 +284,11 @@ the username and password that the {kib} server uses to perform maintenance
on the {kib} index at startup. {kib} users still need to authenticate with
{es}, which is proxied through the {kib} server.
+|[[elasticsearch-service-account-token]] `elasticsearch.serviceAccountToken:`
+ | beta[]. If your {es} is protected with basic authentication, this token provides the credentials
+that the {kib} server uses to perform maintenance on the {kib} index at startup. This setting
+is an alternative to `elasticsearch.username` and `elasticsearch.password`.
+
| `enterpriseSearch.host`
| The URL of your Enterprise Search instance
diff --git a/docs/user/dashboard/tsvb.asciidoc b/docs/user/dashboard/tsvb.asciidoc
index 89da3f7285924..11fe71b7639bb 100644
--- a/docs/user/dashboard/tsvb.asciidoc
+++ b/docs/user/dashboard/tsvb.asciidoc
@@ -148,6 +148,27 @@ The *Markdown* visualization supports Markdown with Handlebar (mustache) syntax
For answers to frequently asked *TSVB* question, review the following.
+[float]
+===== How do I create dashboard drilldowns for Top N and Table visualizations?
+
+You can create dashboard drilldowns that include the specified time range for *Top N* and *Table* visualizations.
+
+. Open the dashboard that you want to link to, then copy the URL.
+
+. Open the dashboard with the *Top N* and *Table* visualization panel, then click *Edit* in the toolbar.
+
+. Open the *Top N* or *Table* panel menu, then select *Edit visualization*.
+
+. Click *Panel options*.
+
+. In the *Item URL* field, enter the URL.
++
+For example `dashboards#/view/f193ca90-c9f4-11eb-b038-dd3270053a27`.
+
+. Click *Save and return*.
+
+. In the toolbar, cick *Save as*, then make sure *Store time with dashboard* is deselected.
+
[float]
===== Why is my TSVB visualization missing data?
diff --git a/docs/user/management.asciidoc b/docs/user/management.asciidoc
index b86fa82c30381..2f9f1fe371dc3 100644
--- a/docs/user/management.asciidoc
+++ b/docs/user/management.asciidoc
@@ -82,9 +82,10 @@ connectors>> for triggering actions.
| Monitor the generation of reports—PDF, PNG, and CSV—and download reports that you previously generated.
A report can contain a dashboard, visualization, saved search, or Canvas workpad.
-| {ml-docs}/ml-jobs.html[Machine Learning Jobs]
-| View your {anomaly-jobs} and {dfanalytics-jobs}. Open the Single Metric
-Viewer or Anomaly Explorer to see your {ml} results.
+| Machine Learning Jobs
+| View your <> and
+<> jobs. Open the Single Metric
+Viewer or Anomaly Explorer to see your {anomaly-detect} results.
| <>
| Detect changes in your data by creating, managing, and monitoring alerts.
diff --git a/docs/user/ml/index.asciidoc b/docs/user/ml/index.asciidoc
index b3606b122d750..a05ff1eeec4a6 100644
--- a/docs/user/ml/index.asciidoc
+++ b/docs/user/ml/index.asciidoc
@@ -48,8 +48,9 @@ pane:
image::user/ml/images/ml-job-management.png[Job Management]
You can use the *Settings* pane to create and edit
-{ml-docs}/ml-calendars.html[calendars] and the filters that are used in
-{ml-docs}/ml-rules.html[custom rules]:
+{ml-docs}/ml-ad-finding-anomalies.html#ml-ad-calendars[calendars] and the
+filters that are used in
+{ml-docs}/ml-ad-finding-anomalies.html#ml-ad-rules[custom rules]:
[role="screenshot"]
image::user/ml/images/ml-settings.png[Calendar Management]
diff --git a/docs/user/production-considerations/task-manager-health-monitoring.asciidoc b/docs/user/production-considerations/task-manager-health-monitoring.asciidoc
index 8f2c8d106c77c..3321a9d0c02a1 100644
--- a/docs/user/production-considerations/task-manager-health-monitoring.asciidoc
+++ b/docs/user/production-considerations/task-manager-health-monitoring.asciidoc
@@ -57,8 +57,12 @@ xpack.task_manager.monitored_task_execution_thresholds:
The health API is best consumed by via the `/api/task_manager/_health` endpoint.
-Additionally, the metrics are logged in the {kib} `DEBUG` logger at a regular cadence.
-To enable Task Manager DEBUG logging in your {kib} instance, add the following to your `kibana.yml`:
+Additionally, there are two ways to consume these metrics:
+
+*Debug logging*
+
+The metrics are logged in the {kib} `DEBUG` logger at a regular cadence.
+To enable Task Manager debug logging in your {kib} instance, add the following to your `kibana.yml`:
[source,yml]
----
@@ -69,7 +73,22 @@ logging:
level: debug
----
-These stats are logged based the number of milliseconds set in your <> setting, which means it could add substantial noise to your logs. Only enable this level of logging temporarily.
+These stats are logged based on the number of milliseconds set in your <> setting, which could add substantial noise to your logs. Only enable this level of logging temporarily.
+
+*Automatic logging*
+
+By default, the health API runs at a regular cadence, and each time it runs, it attempts to self evaluate its performance. If this self evaluation yields a potential problem,
+a message will log to the {kib} server log. In addition, the health API will look at how long tasks have waited to start (from when they were scheduled to start). If this number exceeds a configurable threshold (<>), the same message as above will log to the {kib} server log.
+
+This message looks like:
+
+[source,log]
+----
+Detected potential performance issue with Task Manager. Set 'xpack.task_manager.monitored_stats_health_verbose_log.enabled: true' in your Kibana.yml to enable debug logging`
+----
+
+
+If this message appears, set <> to `true` in your `kibana.yml`. This will start logging the health metrics at either a `warn` or `error` log level, depending on the detected severity of the potential problem.
[float]
[[making-sense-of-task-manager-health-stats]]
diff --git a/package.json b/package.json
index 22eedde59c5e7..5cf72e2110982 100644
--- a/package.json
+++ b/package.json
@@ -99,7 +99,7 @@
"dependencies": {
"@elastic/apm-rum": "^5.8.0",
"@elastic/apm-rum-react": "^1.2.11",
- "@elastic/charts": "31.1.0",
+ "@elastic/charts": "32.0.0",
"@elastic/datemath": "link:bazel-bin/packages/elastic-datemath",
"@elastic/elasticsearch": "npm:@elastic/elasticsearch-canary@^8.0.0-canary.13",
"@elastic/ems-client": "7.14.0",
diff --git a/packages/kbn-securitysolution-list-utils/src/autocomplete_operators/types.ts b/packages/kbn-optimizer/src/__fixtures__/mock_repo/packages/kbn-ui-shared-deps/src/public_path_module_creator.ts
similarity index 56%
rename from packages/kbn-securitysolution-list-utils/src/autocomplete_operators/types.ts
rename to packages/kbn-optimizer/src/__fixtures__/mock_repo/packages/kbn-ui-shared-deps/src/public_path_module_creator.ts
index 1be21bb62a7fe..b03ee16d2f746 100644
--- a/packages/kbn-securitysolution-list-utils/src/autocomplete_operators/types.ts
+++ b/packages/kbn-optimizer/src/__fixtures__/mock_repo/packages/kbn-ui-shared-deps/src/public_path_module_creator.ts
@@ -6,14 +6,4 @@
* Side Public License, v 1.
*/
-import type {
- ListOperatorEnum as OperatorEnum,
- ListOperatorTypeEnum as OperatorTypeEnum,
-} from '@kbn/securitysolution-io-ts-list-types';
-
-export interface OperatorOption {
- message: string;
- value: string;
- operator: OperatorEnum;
- type: OperatorTypeEnum;
-}
+// stub
diff --git a/packages/kbn-optimizer/src/common/__snapshots__/parse_path.test.ts.snap b/packages/kbn-optimizer/src/common/__snapshots__/parse_path.test.ts.snap
index f537674c3fff7..2a30694afb826 100644
--- a/packages/kbn-optimizer/src/common/__snapshots__/parse_path.test.ts.snap
+++ b/packages/kbn-optimizer/src/common/__snapshots__/parse_path.test.ts.snap
@@ -1,7 +1,7 @@
// Jest Snapshot v1, https://goo.gl/fbAQLP
exports[`parseDirPath() parses / 1`] = `
-Object {
+ParsedPath {
"dirs": Array [],
"filename": undefined,
"query": undefined,
@@ -10,7 +10,7 @@ Object {
`;
exports[`parseDirPath() parses /foo 1`] = `
-Object {
+ParsedPath {
"dirs": Array [
"foo",
],
@@ -21,7 +21,7 @@ Object {
`;
exports[`parseDirPath() parses /foo/bar/baz 1`] = `
-Object {
+ParsedPath {
"dirs": Array [
"foo",
"bar",
@@ -34,7 +34,7 @@ Object {
`;
exports[`parseDirPath() parses /foo/bar/baz/ 1`] = `
-Object {
+ParsedPath {
"dirs": Array [
"foo",
"bar",
@@ -47,7 +47,7 @@ Object {
`;
exports[`parseDirPath() parses c:\\ 1`] = `
-Object {
+ParsedPath {
"dirs": Array [],
"filename": undefined,
"query": undefined,
@@ -56,7 +56,7 @@ Object {
`;
exports[`parseDirPath() parses c:\\foo 1`] = `
-Object {
+ParsedPath {
"dirs": Array [
"foo",
],
@@ -67,7 +67,7 @@ Object {
`;
exports[`parseDirPath() parses c:\\foo\\bar\\baz 1`] = `
-Object {
+ParsedPath {
"dirs": Array [
"foo",
"bar",
@@ -80,7 +80,7 @@ Object {
`;
exports[`parseDirPath() parses c:\\foo\\bar\\baz\\ 1`] = `
-Object {
+ParsedPath {
"dirs": Array [
"foo",
"bar",
@@ -93,7 +93,7 @@ Object {
`;
exports[`parseFilePath() parses /foo 1`] = `
-Object {
+ParsedPath {
"dirs": Array [],
"filename": "foo",
"query": undefined,
@@ -102,7 +102,7 @@ Object {
`;
exports[`parseFilePath() parses /foo/bar/baz 1`] = `
-Object {
+ParsedPath {
"dirs": Array [
"foo",
"bar",
@@ -114,7 +114,7 @@ Object {
`;
exports[`parseFilePath() parses /foo/bar/baz.json 1`] = `
-Object {
+ParsedPath {
"dirs": Array [
"foo",
"bar",
@@ -126,7 +126,7 @@ Object {
`;
exports[`parseFilePath() parses /foo/bar/baz.json?light 1`] = `
-Object {
+ParsedPath {
"dirs": Array [
"foo",
"bar",
@@ -140,7 +140,7 @@ Object {
`;
exports[`parseFilePath() parses /foo/bar/baz.json?light=true&dark=false 1`] = `
-Object {
+ParsedPath {
"dirs": Array [
"foo",
"bar",
@@ -155,7 +155,7 @@ Object {
`;
exports[`parseFilePath() parses c:/foo/bar/baz.json 1`] = `
-Object {
+ParsedPath {
"dirs": Array [
"foo",
"bar",
@@ -167,7 +167,7 @@ Object {
`;
exports[`parseFilePath() parses c:\\foo 1`] = `
-Object {
+ParsedPath {
"dirs": Array [],
"filename": "foo",
"query": undefined,
@@ -176,7 +176,7 @@ Object {
`;
exports[`parseFilePath() parses c:\\foo\\bar\\baz 1`] = `
-Object {
+ParsedPath {
"dirs": Array [
"foo",
"bar",
@@ -188,7 +188,7 @@ Object {
`;
exports[`parseFilePath() parses c:\\foo\\bar\\baz.json 1`] = `
-Object {
+ParsedPath {
"dirs": Array [
"foo",
"bar",
@@ -200,7 +200,7 @@ Object {
`;
exports[`parseFilePath() parses c:\\foo\\bar\\baz.json?dark 1`] = `
-Object {
+ParsedPath {
"dirs": Array [
"foo",
"bar",
@@ -214,7 +214,7 @@ Object {
`;
exports[`parseFilePath() parses c:\\foo\\bar\\baz.json?dark=true&light=false 1`] = `
-Object {
+ParsedPath {
"dirs": Array [
"foo",
"bar",
diff --git a/packages/kbn-optimizer/src/common/parse_path.ts b/packages/kbn-optimizer/src/common/parse_path.ts
index 7ea0042db25c9..da3744ba477bd 100644
--- a/packages/kbn-optimizer/src/common/parse_path.ts
+++ b/packages/kbn-optimizer/src/common/parse_path.ts
@@ -9,17 +9,61 @@
import normalizePath from 'normalize-path';
import Qs from 'querystring';
+class ParsedPath {
+ constructor(
+ public readonly root: string,
+ public readonly dirs: string[],
+ public readonly query?: Record,
+ public readonly filename?: string
+ ) {}
+
+ private indexOfDir(match: string | RegExp, fromIndex: number = 0) {
+ for (let i = fromIndex; i < this.dirs.length; i++) {
+ if (this.matchDir(i, match)) {
+ return i;
+ }
+ }
+
+ return -1;
+ }
+
+ private matchDir(i: number, match: string | RegExp) {
+ return typeof match === 'string' ? this.dirs[i] === match : match.test(this.dirs[i]);
+ }
+
+ matchDirs(...segments: Array) {
+ const [first, ...rest] = segments;
+ let fromIndex = 0;
+ while (true) {
+ // do the dirs include the first segment to match?
+ const startIndex = this.indexOfDir(first, fromIndex);
+ if (startIndex === -1) {
+ return;
+ }
+
+ // are all of the ...rest segments also matched at this point?
+ if (!rest.length || rest.every((seg, i) => this.matchDir(startIndex + 1 + i, seg))) {
+ return { startIndex, endIndex: startIndex + rest.length };
+ }
+
+ // no match, search again, this time looking at instances after the matched instance
+ fromIndex = startIndex + 1;
+ }
+ }
+}
+
/**
* Parse an absolute path, supporting normalized paths from webpack,
* into a list of directories and root
*/
export function parseDirPath(path: string) {
const filePath = parseFilePath(path);
- return {
- ...filePath,
- dirs: [...filePath.dirs, ...(filePath.filename ? [filePath.filename] : [])],
- filename: undefined,
- };
+ return new ParsedPath(
+ filePath.root,
+ [...filePath.dirs, ...(filePath.filename ? [filePath.filename] : [])],
+ filePath.query,
+ undefined
+ );
}
export function parseFilePath(path: string) {
@@ -32,10 +76,10 @@ export function parseFilePath(path: string) {
}
const [root, ...others] = normalized.split('/');
- return {
- root: root === '' ? '/' : root,
- dirs: others.slice(0, -1),
+ return new ParsedPath(
+ root === '' ? '/' : root,
+ others.slice(0, -1),
query,
- filename: others[others.length - 1] || undefined,
- };
+ others[others.length - 1] || undefined
+ );
}
diff --git a/packages/kbn-optimizer/src/integration_tests/basic_optimization.test.ts b/packages/kbn-optimizer/src/integration_tests/basic_optimization.test.ts
index 97a7f33be673d..48d36b706b831 100644
--- a/packages/kbn-optimizer/src/integration_tests/basic_optimization.test.ts
+++ b/packages/kbn-optimizer/src/integration_tests/basic_optimization.test.ts
@@ -15,7 +15,7 @@ import cpy from 'cpy';
import del from 'del';
import { tap, filter } from 'rxjs/operators';
import { REPO_ROOT } from '@kbn/utils';
-import { ToolingLog, createReplaceSerializer } from '@kbn/dev-utils';
+import { ToolingLog } from '@kbn/dev-utils';
import { runOptimizer, OptimizerConfig, OptimizerUpdate, logOptimizerState } from '../index';
import { allValuesFrom } from '../common';
@@ -29,8 +29,6 @@ expect.addSnapshotSerializer({
test: (value: any) => typeof value === 'string' && value.includes(REPO_ROOT),
});
-expect.addSnapshotSerializer(createReplaceSerializer(/\w+-fastbuild/, '-fastbuild'));
-
const log = new ToolingLog({
level: 'error',
writeTo: {
@@ -132,7 +130,7 @@ it('builds expected bundles, saves bundle counts to metadata', async () => {
expect(foo.cache.getModuleCount()).toBe(6);
expect(foo.cache.getReferencedFiles()).toMatchInlineSnapshot(`
Array [
- /packages/kbn-optimizer/src/__fixtures__/__tmp__/mock_repo/bazel-out/-fastbuild/bin/packages/kbn-ui-shared-deps/target/public_path_module_creator.js,
+ /packages/kbn-optimizer/src/__fixtures__/__tmp__/mock_repo/packages/kbn-ui-shared-deps/src/public_path_module_creator.ts,
/packages/kbn-optimizer/src/__fixtures__/__tmp__/mock_repo/plugins/foo/kibana.json,
/packages/kbn-optimizer/src/__fixtures__/__tmp__/mock_repo/plugins/foo/public/async_import.ts,
/packages/kbn-optimizer/src/__fixtures__/__tmp__/mock_repo/plugins/foo/public/ext.ts,
@@ -155,7 +153,7 @@ it('builds expected bundles, saves bundle counts to metadata', async () => {
/node_modules/@kbn/optimizer/postcss.config.js,
/node_modules/css-loader/package.json,
/node_modules/style-loader/package.json,
- /packages/kbn-optimizer/src/__fixtures__/__tmp__/mock_repo/bazel-out/-fastbuild/bin/packages/kbn-ui-shared-deps/target/public_path_module_creator.js,
+ /packages/kbn-optimizer/src/__fixtures__/__tmp__/mock_repo/packages/kbn-ui-shared-deps/src/public_path_module_creator.ts,
/packages/kbn-optimizer/src/__fixtures__/__tmp__/mock_repo/plugins/bar/kibana.json,
/packages/kbn-optimizer/src/__fixtures__/__tmp__/mock_repo/plugins/bar/public/index.scss,
/packages/kbn-optimizer/src/__fixtures__/__tmp__/mock_repo/plugins/bar/public/index.ts,
@@ -175,7 +173,7 @@ it('builds expected bundles, saves bundle counts to metadata', async () => {
expect(baz.cache.getReferencedFiles()).toMatchInlineSnapshot(`
Array [
- /packages/kbn-optimizer/src/__fixtures__/__tmp__/mock_repo/bazel-out/-fastbuild/bin/packages/kbn-ui-shared-deps/target/public_path_module_creator.js,
+ /packages/kbn-optimizer/src/__fixtures__/__tmp__/mock_repo/packages/kbn-ui-shared-deps/src/public_path_module_creator.ts,
/packages/kbn-optimizer/src/__fixtures__/__tmp__/mock_repo/x-pack/baz/kibana.json,
/packages/kbn-optimizer/src/__fixtures__/__tmp__/mock_repo/x-pack/baz/public/index.ts,
/packages/kbn-optimizer/src/worker/entry_point_creator.ts,
diff --git a/packages/kbn-optimizer/src/worker/populate_bundle_cache_plugin.ts b/packages/kbn-optimizer/src/worker/populate_bundle_cache_plugin.ts
index 8d890b31b639d..a3455d7ddf2b9 100644
--- a/packages/kbn-optimizer/src/worker/populate_bundle_cache_plugin.ts
+++ b/packages/kbn-optimizer/src/worker/populate_bundle_cache_plugin.ts
@@ -6,11 +6,11 @@
* Side Public License, v 1.
*/
-import webpack from 'webpack';
-
import Path from 'path';
import { inspect } from 'util';
+import webpack from 'webpack';
+
import { Bundle, WorkerConfig, ascending, parseFilePath } from '../common';
import { BundleRefModule } from './bundle_ref_module';
import {
@@ -21,6 +21,20 @@ import {
getModulePath,
} from './webpack_helpers';
+function tryToResolveRewrittenPath(from: string, toResolve: string) {
+ try {
+ return require.resolve(toResolve);
+ } catch (error) {
+ if (error.code === 'MODULE_NOT_FOUND') {
+ throw new Error(
+ `attempted to rewrite bazel-out path [${from}] to [${toResolve}] but couldn't find the rewrite target`
+ );
+ }
+
+ throw error;
+ }
+}
+
/**
* sass-loader creates about a 40% overhead on the overall optimizer runtime, and
* so this constant is used to indicate to assignBundlesToWorkers() that there is
@@ -57,17 +71,44 @@ export class PopulateBundleCachePlugin {
let path = getModulePath(module);
let parsedPath = parseFilePath(path);
- if (parsedPath.dirs.includes('bazel-out')) {
- const index = parsedPath.dirs.indexOf('bazel-out');
- path = Path.join(
- workerConfig.repoRoot,
- 'bazel-out',
- ...parsedPath.dirs.slice(index + 1),
- parsedPath.filename ?? ''
+ const bazelOut = parsedPath.matchDirs(
+ 'bazel-out',
+ /-fastbuild$/,
+ 'bin',
+ 'packages',
+ /.*/,
+ 'target'
+ );
+
+ // if the module is referenced from one of our packages and resolved to the `bazel-out` dir
+ // we should rewrite our reference to point to the source file so that we can track the
+ // modified time of that file rather than the built output which is rebuilt all the time
+ // without actually changing
+ if (bazelOut) {
+ const packageDir = parsedPath.dirs[bazelOut.endIndex - 1];
+ const subDirs = parsedPath.dirs.slice(bazelOut.endIndex + 1);
+ path = tryToResolveRewrittenPath(
+ path,
+ Path.join(
+ workerConfig.repoRoot,
+ 'packages',
+ packageDir,
+ 'src',
+ ...subDirs,
+ parsedPath.filename
+ ? Path.basename(parsedPath.filename, Path.extname(parsedPath.filename))
+ : ''
+ )
);
parsedPath = parseFilePath(path);
}
+ if (parsedPath.matchDirs('bazel-out')) {
+ throw new Error(
+ `a bazel-out dir is being referenced by module [${path}] and not getting rewritten to its source location`
+ );
+ }
+
if (!parsedPath.dirs.includes('node_modules')) {
referencedFiles.add(path);
diff --git a/packages/kbn-securitysolution-list-utils/src/autocomplete_operators/index.ts b/packages/kbn-securitysolution-list-utils/src/autocomplete_operators/index.ts
index 967cebc360f61..051c359dc4612 100644
--- a/packages/kbn-securitysolution-list-utils/src/autocomplete_operators/index.ts
+++ b/packages/kbn-securitysolution-list-utils/src/autocomplete_operators/index.ts
@@ -11,8 +11,7 @@ import {
ListOperatorEnum as OperatorEnum,
ListOperatorTypeEnum as OperatorTypeEnum,
} from '@kbn/securitysolution-io-ts-list-types';
-
-import { OperatorOption } from './types';
+import { OperatorOption } from '../types';
export const isOperator: OperatorOption = {
message: i18n.translate('lists.exceptions.isOperatorLabel', {
diff --git a/packages/kbn-securitysolution-list-utils/src/helpers/index.ts b/packages/kbn-securitysolution-list-utils/src/helpers/index.ts
index d208624b69fc5..38446b2a08ec0 100644
--- a/packages/kbn-securitysolution-list-utils/src/helpers/index.ts
+++ b/packages/kbn-securitysolution-list-utils/src/helpers/index.ts
@@ -43,7 +43,6 @@ import {
isOneOfOperator,
isOperator,
} from '../autocomplete_operators';
-import { OperatorOption } from '../autocomplete_operators/types';
import {
BuilderEntry,
@@ -52,6 +51,7 @@ import {
EmptyNestedEntry,
ExceptionsBuilderExceptionItem,
FormattedBuilderEntry,
+ OperatorOption,
} from '../types';
export const isEntryNested = (item: BuilderEntry): item is EntryNested => {
diff --git a/packages/kbn-securitysolution-list-utils/src/types/index.ts b/packages/kbn-securitysolution-list-utils/src/types/index.ts
index faf68ca157981..537ac06a49f34 100644
--- a/packages/kbn-securitysolution-list-utils/src/types/index.ts
+++ b/packages/kbn-securitysolution-list-utils/src/types/index.ts
@@ -23,7 +23,12 @@ import {
EXCEPTION_LIST_NAMESPACE_AGNOSTIC,
} from '@kbn/securitysolution-list-constants';
-import type { OperatorOption } from '../autocomplete_operators/types';
+export interface OperatorOption {
+ message: string;
+ value: string;
+ operator: OperatorEnum;
+ type: OperatorTypeEnum;
+}
/**
* @deprecated Use the one from core once it is in its own package which will be from:
diff --git a/packages/kbn-spec-to-console/README.md b/packages/kbn-spec-to-console/README.md
index 0328dec791320..a0e654713f61b 100644
--- a/packages/kbn-spec-to-console/README.md
+++ b/packages/kbn-spec-to-console/README.md
@@ -18,15 +18,10 @@ git pull --depth=1 origin master
### Usage
-You need to run the command twice: once for the **OSS** specs and once for the **X-Pack** specs
At the root of the Kibana repository, run the following commands:
```sh
-# OSS
yarn spec_to_console -g "/rest-api-spec/src/main/resources/rest-api-spec/api/*" -d "src/plugins/console/server/lib/spec_definitions/json/generated"
-
-# X-pack
-yarn spec_to_console -g "/x-pack/plugin/src/test/resources/rest-api-spec/api/*" -d "x-pack/plugins/console_extensions/server/lib/spec_definitions/json/generated"
```
### Information used in Console that is not available in the REST spec
diff --git a/packages/kbn-spec-to-console/lib/convert/params.js b/packages/kbn-spec-to-console/lib/convert/params.js
index e5365b4d7311e..1aa89be11c76d 100644
--- a/packages/kbn-spec-to-console/lib/convert/params.js
+++ b/packages/kbn-spec-to-console/lib/convert/params.js
@@ -37,6 +37,7 @@ module.exports = (params) => {
case 'string':
case 'number':
case 'number|string':
+ case 'boolean|long':
result[param] = defaultValue || '';
break;
case 'list':
diff --git a/src/cli/serve/serve.js b/src/cli/serve/serve.js
index ad83965efde33..be949350f7229 100644
--- a/src/cli/serve/serve.js
+++ b/src/cli/serve/serve.js
@@ -68,12 +68,14 @@ function applyConfigOverrides(rawConfig, opts, extraCliOptions) {
delete extraCliOptions.env;
if (opts.dev) {
- if (!has('elasticsearch.username')) {
- set('elasticsearch.username', 'kibana_system');
- }
+ if (!has('elasticsearch.serviceAccountToken')) {
+ if (!has('elasticsearch.username')) {
+ set('elasticsearch.username', 'kibana_system');
+ }
- if (!has('elasticsearch.password')) {
- set('elasticsearch.password', 'changeme');
+ if (!has('elasticsearch.password')) {
+ set('elasticsearch.password', 'changeme');
+ }
}
if (opts.ssl) {
diff --git a/src/core/public/chrome/ui/header/header_help_menu.tsx b/src/core/public/chrome/ui/header/header_help_menu.tsx
index c6a09c1177a5e..cbf89bba2ca44 100644
--- a/src/core/public/chrome/ui/header/header_help_menu.tsx
+++ b/src/core/public/chrome/ui/header/header_help_menu.tsx
@@ -211,7 +211,7 @@ export class HeaderHelpMenu extends Component {
return (
-
+