diff --git a/.i18nrc.json b/.i18nrc.json
index d85c24dce68af..6369637479478 100644
--- a/.i18nrc.json
+++ b/.i18nrc.json
@@ -25,6 +25,7 @@
"expressionRepeatImage": "src/plugins/expression_repeat_image",
"expressionRevealImage": "src/plugins/expression_reveal_image",
"expressionShape": "src/plugins/expression_shape",
+ "expressionTagcloud": "src/plugins/chart_expressions/expression_tagcloud",
"inputControl": "src/plugins/input_control_vis",
"inspector": "src/plugins/inspector",
"inspectorViews": "src/legacy/core_plugins/inspector_views",
@@ -65,9 +66,9 @@
"visTypeTagCloud": "src/plugins/vis_type_tagcloud",
"visTypeTimeseries": "src/plugins/vis_type_timeseries",
"visTypeVega": "src/plugins/vis_type_vega",
- "visTypeVislib": "src/plugins/vis_type_vislib",
- "visTypeXy": "src/plugins/vis_type_xy",
- "visTypePie": "src/plugins/vis_type_pie",
+ "visTypeVislib": "src/plugins/vis_types/vislib",
+ "visTypeXy": "src/plugins/vis_types/xy",
+ "visTypePie": "src/plugins/vis_types/pie",
"visualizations": "src/plugins/visualizations",
"visualize": "src/plugins/visualize",
"apmOss": "src/plugins/apm_oss",
diff --git a/docs/developer/plugin-list.asciidoc b/docs/developer/plugin-list.asciidoc
index b665206b75576..393e5efed4516 100644
--- a/docs/developer/plugin-list.asciidoc
+++ b/docs/developer/plugin-list.asciidoc
@@ -113,6 +113,10 @@ for use in their own application.
|Expression Shape plugin adds a shape function to the expression plugin and an associated renderer. The renderer will display the given shape with selected decorations.
+|{kib-repo}blob/{branch}/src/plugins/chart_expressions/expression_tagcloud/README.md[expressionTagcloud]
+|Expression Tagcloud plugin adds a tagcloud renderer and function to the expression plugin. The renderer will display the Wordcloud chart.
+
+
|{kib-repo}blob/{branch}/src/plugins/field_formats/README.md[fieldFormats]
|Index pattern fields formatters
@@ -294,7 +298,7 @@ The plugin exposes the static DefaultEditorController class to consume.
|WARNING: Missing README.
-|{kib-repo}blob/{branch}/src/plugins/vis_type_pie[visTypePie]
+|{kib-repo}blob/{branch}/src/plugins/vis_types/pie[visTypePie]
|WARNING: Missing README.
@@ -318,11 +322,11 @@ The plugin exposes the static DefaultEditorController class to consume.
|WARNING: Missing README.
-|{kib-repo}blob/{branch}/src/plugins/vis_type_vislib[visTypeVislib]
+|{kib-repo}blob/{branch}/src/plugins/vis_types/vislib[visTypeVislib]
|WARNING: Missing README.
-|{kib-repo}blob/{branch}/src/plugins/vis_type_xy[visTypeXy]
+|{kib-repo}blob/{branch}/src/plugins/vis_types/xy[visTypeXy]
|WARNING: Missing README.
diff --git a/docs/development/core/public/kibana-plugin-core-public.chromebrand.logo.md b/docs/development/core/public/kibana-plugin-core-public.chromebrand.logo.md
deleted file mode 100644
index 561d9c50008b8..0000000000000
--- a/docs/development/core/public/kibana-plugin-core-public.chromebrand.logo.md
+++ /dev/null
@@ -1,11 +0,0 @@
-
-
-[Home](./index.md) > [kibana-plugin-core-public](./kibana-plugin-core-public.md) > [ChromeBrand](./kibana-plugin-core-public.chromebrand.md) > [logo](./kibana-plugin-core-public.chromebrand.logo.md)
-
-## ChromeBrand.logo property
-
-Signature:
-
-```typescript
-logo?: string;
-```
diff --git a/docs/development/core/public/kibana-plugin-core-public.chromebrand.md b/docs/development/core/public/kibana-plugin-core-public.chromebrand.md
deleted file mode 100644
index 21cdf6c3dee9b..0000000000000
--- a/docs/development/core/public/kibana-plugin-core-public.chromebrand.md
+++ /dev/null
@@ -1,20 +0,0 @@
-
-
-[Home](./index.md) > [kibana-plugin-core-public](./kibana-plugin-core-public.md) > [ChromeBrand](./kibana-plugin-core-public.chromebrand.md)
-
-## ChromeBrand interface
-
-
-Signature:
-
-```typescript
-export interface ChromeBrand
-```
-
-## Properties
-
-| Property | Type | Description |
-| --- | --- | --- |
-| [logo](./kibana-plugin-core-public.chromebrand.logo.md) | string | |
-| [smallLogo](./kibana-plugin-core-public.chromebrand.smalllogo.md) | string | |
-
diff --git a/docs/development/core/public/kibana-plugin-core-public.chromebrand.smalllogo.md b/docs/development/core/public/kibana-plugin-core-public.chromebrand.smalllogo.md
deleted file mode 100644
index 5b21e806540be..0000000000000
--- a/docs/development/core/public/kibana-plugin-core-public.chromebrand.smalllogo.md
+++ /dev/null
@@ -1,11 +0,0 @@
-
-
-[Home](./index.md) > [kibana-plugin-core-public](./kibana-plugin-core-public.md) > [ChromeBrand](./kibana-plugin-core-public.chromebrand.md) > [smallLogo](./kibana-plugin-core-public.chromebrand.smalllogo.md)
-
-## ChromeBrand.smallLogo property
-
-Signature:
-
-```typescript
-smallLogo?: string;
-```
diff --git a/docs/development/core/public/kibana-plugin-core-public.chromestart.addapplicationclass.md b/docs/development/core/public/kibana-plugin-core-public.chromestart.addapplicationclass.md
deleted file mode 100644
index 67e86863ad3c8..0000000000000
--- a/docs/development/core/public/kibana-plugin-core-public.chromestart.addapplicationclass.md
+++ /dev/null
@@ -1,24 +0,0 @@
-
-
-[Home](./index.md) > [kibana-plugin-core-public](./kibana-plugin-core-public.md) > [ChromeStart](./kibana-plugin-core-public.chromestart.md) > [addApplicationClass](./kibana-plugin-core-public.chromestart.addapplicationclass.md)
-
-## ChromeStart.addApplicationClass() method
-
-Add a className that should be set on the application container.
-
-Signature:
-
-```typescript
-addApplicationClass(className: string): void;
-```
-
-## Parameters
-
-| Parameter | Type | Description |
-| --- | --- | --- |
-| className | string | |
-
-Returns:
-
-`void`
-
diff --git a/docs/development/core/public/kibana-plugin-core-public.chromestart.getapplicationclasses_.md b/docs/development/core/public/kibana-plugin-core-public.chromestart.getapplicationclasses_.md
deleted file mode 100644
index c932d8b7f0a40..0000000000000
--- a/docs/development/core/public/kibana-plugin-core-public.chromestart.getapplicationclasses_.md
+++ /dev/null
@@ -1,17 +0,0 @@
-
-
-[Home](./index.md) > [kibana-plugin-core-public](./kibana-plugin-core-public.md) > [ChromeStart](./kibana-plugin-core-public.chromestart.md) > [getApplicationClasses$](./kibana-plugin-core-public.chromestart.getapplicationclasses_.md)
-
-## ChromeStart.getApplicationClasses$() method
-
-Get the current set of classNames that will be set on the application container.
-
-Signature:
-
-```typescript
-getApplicationClasses$(): Observable;
-```
-Returns:
-
-`Observable`
-
diff --git a/docs/development/core/public/kibana-plugin-core-public.chromestart.getbrand_.md b/docs/development/core/public/kibana-plugin-core-public.chromestart.getbrand_.md
deleted file mode 100644
index fa42defd6339a..0000000000000
--- a/docs/development/core/public/kibana-plugin-core-public.chromestart.getbrand_.md
+++ /dev/null
@@ -1,17 +0,0 @@
-
-
-[Home](./index.md) > [kibana-plugin-core-public](./kibana-plugin-core-public.md) > [ChromeStart](./kibana-plugin-core-public.chromestart.md) > [getBrand$](./kibana-plugin-core-public.chromestart.getbrand_.md)
-
-## ChromeStart.getBrand$() method
-
-Get an observable of the current brand information.
-
-Signature:
-
-```typescript
-getBrand$(): Observable;
-```
-Returns:
-
-`Observable`
-
diff --git a/docs/development/core/public/kibana-plugin-core-public.chromestart.md b/docs/development/core/public/kibana-plugin-core-public.chromestart.md
index 2d465745c436b..7285b4a00a0ec 100644
--- a/docs/development/core/public/kibana-plugin-core-public.chromestart.md
+++ b/docs/development/core/public/kibana-plugin-core-public.chromestart.md
@@ -50,20 +50,14 @@ core.chrome.setHelpExtension(elem => {
| Method | Description |
| --- | --- |
-| [addApplicationClass(className)](./kibana-plugin-core-public.chromestart.addapplicationclass.md) | Add a className that should be set on the application container. |
-| [getApplicationClasses$()](./kibana-plugin-core-public.chromestart.getapplicationclasses_.md) | Get the current set of classNames that will be set on the application container. |
| [getBadge$()](./kibana-plugin-core-public.chromestart.getbadge_.md) | Get an observable of the current badge |
-| [getBrand$()](./kibana-plugin-core-public.chromestart.getbrand_.md) | Get an observable of the current brand information. |
| [getBreadcrumbs$()](./kibana-plugin-core-public.chromestart.getbreadcrumbs_.md) | Get an observable of the current list of breadcrumbs |
| [getBreadcrumbsAppendExtension$()](./kibana-plugin-core-public.chromestart.getbreadcrumbsappendextension_.md) | Get an observable of the current extension appended to breadcrumbs |
| [getCustomNavLink$()](./kibana-plugin-core-public.chromestart.getcustomnavlink_.md) | Get an observable of the current custom nav link |
| [getHelpExtension$()](./kibana-plugin-core-public.chromestart.gethelpextension_.md) | Get an observable of the current custom help conttent |
| [getIsNavDrawerLocked$()](./kibana-plugin-core-public.chromestart.getisnavdrawerlocked_.md) | Get an observable of the current locked state of the nav drawer. |
| [getIsVisible$()](./kibana-plugin-core-public.chromestart.getisvisible_.md) | Get an observable of the current visibility state of the chrome. |
-| [removeApplicationClass(className)](./kibana-plugin-core-public.chromestart.removeapplicationclass.md) | Remove a className added with addApplicationClass(). If className is unknown it is ignored. |
-| [setAppTitle(appTitle)](./kibana-plugin-core-public.chromestart.setapptitle.md) | Sets the current app's title |
| [setBadge(badge)](./kibana-plugin-core-public.chromestart.setbadge.md) | Override the current badge |
-| [setBrand(brand)](./kibana-plugin-core-public.chromestart.setbrand.md) | Set the brand configuration. |
| [setBreadcrumbs(newBreadcrumbs)](./kibana-plugin-core-public.chromestart.setbreadcrumbs.md) | Override the current set of breadcrumbs |
| [setBreadcrumbsAppendExtension(breadcrumbsAppendExtension)](./kibana-plugin-core-public.chromestart.setbreadcrumbsappendextension.md) | Mount an element next to the last breadcrumb |
| [setCustomNavLink(newCustomNavLink)](./kibana-plugin-core-public.chromestart.setcustomnavlink.md) | Override the current set of custom nav link |
diff --git a/docs/development/core/public/kibana-plugin-core-public.chromestart.removeapplicationclass.md b/docs/development/core/public/kibana-plugin-core-public.chromestart.removeapplicationclass.md
deleted file mode 100644
index 5bdeec635ed44..0000000000000
--- a/docs/development/core/public/kibana-plugin-core-public.chromestart.removeapplicationclass.md
+++ /dev/null
@@ -1,24 +0,0 @@
-
-
-[Home](./index.md) > [kibana-plugin-core-public](./kibana-plugin-core-public.md) > [ChromeStart](./kibana-plugin-core-public.chromestart.md) > [removeApplicationClass](./kibana-plugin-core-public.chromestart.removeapplicationclass.md)
-
-## ChromeStart.removeApplicationClass() method
-
-Remove a className added with `addApplicationClass()`. If className is unknown it is ignored.
-
-Signature:
-
-```typescript
-removeApplicationClass(className: string): void;
-```
-
-## Parameters
-
-| Parameter | Type | Description |
-| --- | --- | --- |
-| className | string | |
-
-Returns:
-
-`void`
-
diff --git a/docs/development/core/public/kibana-plugin-core-public.chromestart.setapptitle.md b/docs/development/core/public/kibana-plugin-core-public.chromestart.setapptitle.md
deleted file mode 100644
index f0e2db30f1891..0000000000000
--- a/docs/development/core/public/kibana-plugin-core-public.chromestart.setapptitle.md
+++ /dev/null
@@ -1,24 +0,0 @@
-
-
-[Home](./index.md) > [kibana-plugin-core-public](./kibana-plugin-core-public.md) > [ChromeStart](./kibana-plugin-core-public.chromestart.md) > [setAppTitle](./kibana-plugin-core-public.chromestart.setapptitle.md)
-
-## ChromeStart.setAppTitle() method
-
-Sets the current app's title
-
-Signature:
-
-```typescript
-setAppTitle(appTitle: string): void;
-```
-
-## Parameters
-
-| Parameter | Type | Description |
-| --- | --- | --- |
-| appTitle | string | |
-
-Returns:
-
-`void`
-
diff --git a/docs/development/core/public/kibana-plugin-core-public.chromestart.setbrand.md b/docs/development/core/public/kibana-plugin-core-public.chromestart.setbrand.md
deleted file mode 100644
index daaa510483ae7..0000000000000
--- a/docs/development/core/public/kibana-plugin-core-public.chromestart.setbrand.md
+++ /dev/null
@@ -1,39 +0,0 @@
-
-
-[Home](./index.md) > [kibana-plugin-core-public](./kibana-plugin-core-public.md) > [ChromeStart](./kibana-plugin-core-public.chromestart.md) > [setBrand](./kibana-plugin-core-public.chromestart.setbrand.md)
-
-## ChromeStart.setBrand() method
-
-Set the brand configuration.
-
-Signature:
-
-```typescript
-setBrand(brand: ChromeBrand): void;
-```
-
-## Parameters
-
-| Parameter | Type | Description |
-| --- | --- | --- |
-| brand | ChromeBrand | |
-
-Returns:
-
-`void`
-
-## Remarks
-
-Normally the `logo` property will be rendered as the CSS background for the home link in the chrome navigation, but when the page is rendered in a small window the `smallLogo` will be used and rendered at about 45px wide.
-
-## Example
-
-
-```js
-chrome.setBrand({
- logo: 'url(/plugins/app/logo.png) center no-repeat'
- smallLogo: 'url(/plugins/app/logo-small.png) center no-repeat'
-})
-
-```
-
diff --git a/docs/development/core/public/kibana-plugin-core-public.md b/docs/development/core/public/kibana-plugin-core-public.md
index e984fbb675e6d..59735b053adbc 100644
--- a/docs/development/core/public/kibana-plugin-core-public.md
+++ b/docs/development/core/public/kibana-plugin-core-public.md
@@ -42,7 +42,6 @@ The plugin integrates with the core system via lifecycle events: `setup`
| [AsyncPlugin](./kibana-plugin-core-public.asyncplugin.md) | A plugin with asynchronous lifecycle methods. |
| [Capabilities](./kibana-plugin-core-public.capabilities.md) | The read-only set of capabilities available for the current UI session. Capabilities are simple key-value pairs of (string, boolean), where the string denotes the capability ID, and the boolean is a flag indicating if the capability is enabled or disabled. |
| [ChromeBadge](./kibana-plugin-core-public.chromebadge.md) | |
-| [ChromeBrand](./kibana-plugin-core-public.chromebrand.md) | |
| [ChromeDocTitle](./kibana-plugin-core-public.chromedoctitle.md) | APIs for accessing and updating the document title. |
| [ChromeHelpExtension](./kibana-plugin-core-public.chromehelpextension.md) | |
| [ChromeHelpExtensionMenuCustomLink](./kibana-plugin-core-public.chromehelpextensionmenucustomlink.md) | |
diff --git a/docs/development/core/server/kibana-plugin-core-server.elasticsearchclientconfig.md b/docs/development/core/server/kibana-plugin-core-server.elasticsearchclientconfig.md
index 208e0e0175d71..0084b0b50c869 100644
--- a/docs/development/core/server/kibana-plugin-core-server.elasticsearchclientconfig.md
+++ b/docs/development/core/server/kibana-plugin-core-server.elasticsearchclientconfig.md
@@ -14,5 +14,6 @@ export declare type ElasticsearchClientConfig = Pick;
keepAlive?: boolean;
+ caFingerprint?: ClientOptions['caFingerprint'];
};
```
diff --git a/docs/development/core/server/kibana-plugin-core-server.httpservicepreboot.getserverinfo.md b/docs/development/core/server/kibana-plugin-core-server.httpservicepreboot.getserverinfo.md
new file mode 100644
index 0000000000000..0c9636b8eb634
--- /dev/null
+++ b/docs/development/core/server/kibana-plugin-core-server.httpservicepreboot.getserverinfo.md
@@ -0,0 +1,13 @@
+
+
+[Home](./index.md) > [kibana-plugin-core-server](./kibana-plugin-core-server.md) > [HttpServicePreboot](./kibana-plugin-core-server.httpservicepreboot.md) > [getServerInfo](./kibana-plugin-core-server.httpservicepreboot.getserverinfo.md)
+
+## HttpServicePreboot.getServerInfo property
+
+Provides common [information](./kibana-plugin-core-server.httpserverinfo.md) about the running preboot http server.
+
+Signature:
+
+```typescript
+getServerInfo: () => HttpServerInfo;
+```
diff --git a/docs/development/core/server/kibana-plugin-core-server.httpservicepreboot.md b/docs/development/core/server/kibana-plugin-core-server.httpservicepreboot.md
index b4adf454a480f..ab0fc365fc651 100644
--- a/docs/development/core/server/kibana-plugin-core-server.httpservicepreboot.md
+++ b/docs/development/core/server/kibana-plugin-core-server.httpservicepreboot.md
@@ -73,6 +73,7 @@ httpPreboot.registerRoutes('my-plugin', (router) => {
| Property | Type | Description |
| --- | --- | --- |
| [basePath](./kibana-plugin-core-server.httpservicepreboot.basepath.md) | IBasePath | Access or manipulate the Kibana base path See [IBasePath](./kibana-plugin-core-server.ibasepath.md). |
+| [getServerInfo](./kibana-plugin-core-server.httpservicepreboot.getserverinfo.md) | () => HttpServerInfo | Provides common [information](./kibana-plugin-core-server.httpserverinfo.md) about the running preboot http server. |
## Methods
diff --git a/docs/development/core/server/kibana-plugin-core-server.savedobjectsopenpointintimeoptions.md b/docs/development/core/server/kibana-plugin-core-server.savedobjectsopenpointintimeoptions.md
index 46516be2329e9..fc825e3bf2937 100644
--- a/docs/development/core/server/kibana-plugin-core-server.savedobjectsopenpointintimeoptions.md
+++ b/docs/development/core/server/kibana-plugin-core-server.savedobjectsopenpointintimeoptions.md
@@ -8,7 +8,7 @@
Signature:
```typescript
-export interface SavedObjectsOpenPointInTimeOptions extends SavedObjectsBaseOptions
+export interface SavedObjectsOpenPointInTimeOptions
```
## Properties
@@ -16,5 +16,6 @@ export interface SavedObjectsOpenPointInTimeOptions extends SavedObjectsBaseOpti
| Property | Type | Description |
| --- | --- | --- |
| [keepAlive](./kibana-plugin-core-server.savedobjectsopenpointintimeoptions.keepalive.md) | string | Optionally specify how long ES should keep the PIT alive until the next request. Defaults to 5m. |
+| [namespaces](./kibana-plugin-core-server.savedobjectsopenpointintimeoptions.namespaces.md) | string[] | An optional list of namespaces to be used when opening the PIT.When the spaces plugin is enabled: - this will default to the user's current space (as determined by the URL) - if specified, the user's current space will be ignored - ['*'] will search across all available spaces |
| [preference](./kibana-plugin-core-server.savedobjectsopenpointintimeoptions.preference.md) | string | An optional ES preference value to be used for the query. |
diff --git a/docs/development/core/server/kibana-plugin-core-server.savedobjectsopenpointintimeoptions.namespaces.md b/docs/development/core/server/kibana-plugin-core-server.savedobjectsopenpointintimeoptions.namespaces.md
new file mode 100644
index 0000000000000..06fb7519d52c2
--- /dev/null
+++ b/docs/development/core/server/kibana-plugin-core-server.savedobjectsopenpointintimeoptions.namespaces.md
@@ -0,0 +1,15 @@
+
+
+[Home](./index.md) > [kibana-plugin-core-server](./kibana-plugin-core-server.md) > [SavedObjectsOpenPointInTimeOptions](./kibana-plugin-core-server.savedobjectsopenpointintimeoptions.md) > [namespaces](./kibana-plugin-core-server.savedobjectsopenpointintimeoptions.namespaces.md)
+
+## SavedObjectsOpenPointInTimeOptions.namespaces property
+
+An optional list of namespaces to be used when opening the PIT.
+
+When the spaces plugin is enabled: - this will default to the user's current space (as determined by the URL) - if specified, the user's current space will be ignored - `['*']` will search across all available spaces
+
+Signature:
+
+```typescript
+namespaces?: string[];
+```
diff --git a/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.ikibanasearchresponse.md b/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.ikibanasearchresponse.md
index c7046902dac72..73261cd49d6d2 100644
--- a/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.ikibanasearchresponse.md
+++ b/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.ikibanasearchresponse.md
@@ -21,4 +21,5 @@ export interface IKibanaSearchResponse
| [loaded](./kibana-plugin-plugins-data-public.ikibanasearchresponse.loaded.md) | number | If relevant to the search strategy, return a loaded number that represents how progress is indicated. |
| [rawResponse](./kibana-plugin-plugins-data-public.ikibanasearchresponse.rawresponse.md) | RawResponse | The raw response returned by the internal search method (usually the raw ES response) |
| [total](./kibana-plugin-plugins-data-public.ikibanasearchresponse.total.md) | number | If relevant to the search strategy, return a total number that represents how progress is indicated. |
+| [warning](./kibana-plugin-plugins-data-public.ikibanasearchresponse.warning.md) | string | Optional warnings that should be surfaced to the end user |
diff --git a/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.ikibanasearchresponse.warning.md b/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.ikibanasearchresponse.warning.md
new file mode 100644
index 0000000000000..cc0b8e2bea56e
--- /dev/null
+++ b/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.ikibanasearchresponse.warning.md
@@ -0,0 +1,13 @@
+
+
+[Home](./index.md) > [kibana-plugin-plugins-data-public](./kibana-plugin-plugins-data-public.md) > [IKibanaSearchResponse](./kibana-plugin-plugins-data-public.ikibanasearchresponse.md) > [warning](./kibana-plugin-plugins-data-public.ikibanasearchresponse.warning.md)
+
+## IKibanaSearchResponse.warning property
+
+Optional warnings that should be surfaced to the end user
+
+Signature:
+
+```typescript
+warning?: string;
+```
diff --git a/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.indexpatternsservice.hasuserindexpattern.md b/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.indexpatternsservice.hasuserindexpattern.md
new file mode 100644
index 0000000000000..31d1b9b9c16a9
--- /dev/null
+++ b/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.indexpatternsservice.hasuserindexpattern.md
@@ -0,0 +1,17 @@
+
+
+[Home](./index.md) > [kibana-plugin-plugins-data-public](./kibana-plugin-plugins-data-public.md) > [IndexPatternsService](./kibana-plugin-plugins-data-public.indexpatternsservice.md) > [hasUserIndexPattern](./kibana-plugin-plugins-data-public.indexpatternsservice.hasuserindexpattern.md)
+
+## IndexPatternsService.hasUserIndexPattern() method
+
+Checks if current user has a user created index pattern ignoring fleet's server default index patterns
+
+Signature:
+
+```typescript
+hasUserIndexPattern(): Promise;
+```
+Returns:
+
+`Promise`
+
diff --git a/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.indexpatternsservice.md b/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.indexpatternsservice.md
index 30e7a6be143e9..1af365d96a254 100644
--- a/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.indexpatternsservice.md
+++ b/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.indexpatternsservice.md
@@ -45,6 +45,7 @@ export declare class IndexPatternsService
| [createAndSave(spec, override, skipFetchFields)](./kibana-plugin-plugins-data-public.indexpatternsservice.createandsave.md) | | Create a new index pattern and save it right away |
| [createSavedObject(indexPattern, override)](./kibana-plugin-plugins-data-public.indexpatternsservice.createsavedobject.md) | | Save a new index pattern |
| [delete(indexPatternId)](./kibana-plugin-plugins-data-public.indexpatternsservice.delete.md) | | Deletes an index pattern from .kibana index |
+| [hasUserIndexPattern()](./kibana-plugin-plugins-data-public.indexpatternsservice.hasuserindexpattern.md) | | Checks if current user has a user created index pattern ignoring fleet's server default index patterns |
| [migrate(indexPattern, newTitle)](./kibana-plugin-plugins-data-public.indexpatternsservice.migrate.md) | | |
| [updateSavedObject(indexPattern, saveAttempts, ignoreErrors)](./kibana-plugin-plugins-data-public.indexpatternsservice.updatesavedobject.md) | | Save existing index pattern. Will attempt to merge differences if there are conflicts |
diff --git a/docs/development/plugins/data/server/kibana-plugin-plugins-data-server.indexpatternsservice.hasuserindexpattern.md b/docs/development/plugins/data/server/kibana-plugin-plugins-data-server.indexpatternsservice.hasuserindexpattern.md
new file mode 100644
index 0000000000000..49f365c106040
--- /dev/null
+++ b/docs/development/plugins/data/server/kibana-plugin-plugins-data-server.indexpatternsservice.hasuserindexpattern.md
@@ -0,0 +1,17 @@
+
+
+[Home](./index.md) > [kibana-plugin-plugins-data-server](./kibana-plugin-plugins-data-server.md) > [IndexPatternsService](./kibana-plugin-plugins-data-server.indexpatternsservice.md) > [hasUserIndexPattern](./kibana-plugin-plugins-data-server.indexpatternsservice.hasuserindexpattern.md)
+
+## IndexPatternsService.hasUserIndexPattern() method
+
+Checks if current user has a user created index pattern ignoring fleet's server default index patterns
+
+Signature:
+
+```typescript
+hasUserIndexPattern(): Promise;
+```
+Returns:
+
+`Promise`
+
diff --git a/docs/development/plugins/data/server/kibana-plugin-plugins-data-server.indexpatternsservice.md b/docs/development/plugins/data/server/kibana-plugin-plugins-data-server.indexpatternsservice.md
index b42325b578f6e..2e71c1f7c4f93 100644
--- a/docs/development/plugins/data/server/kibana-plugin-plugins-data-server.indexpatternsservice.md
+++ b/docs/development/plugins/data/server/kibana-plugin-plugins-data-server.indexpatternsservice.md
@@ -45,6 +45,7 @@ export declare class IndexPatternsService
| [createAndSave(spec, override, skipFetchFields)](./kibana-plugin-plugins-data-server.indexpatternsservice.createandsave.md) | | Create a new index pattern and save it right away |
| [createSavedObject(indexPattern, override)](./kibana-plugin-plugins-data-server.indexpatternsservice.createsavedobject.md) | | Save a new index pattern |
| [delete(indexPatternId)](./kibana-plugin-plugins-data-server.indexpatternsservice.delete.md) | | Deletes an index pattern from .kibana index |
+| [hasUserIndexPattern()](./kibana-plugin-plugins-data-server.indexpatternsservice.hasuserindexpattern.md) | | Checks if current user has a user created index pattern ignoring fleet's server default index patterns |
| [migrate(indexPattern, newTitle)](./kibana-plugin-plugins-data-server.indexpatternsservice.migrate.md) | | |
| [updateSavedObject(indexPattern, saveAttempts, ignoreErrors)](./kibana-plugin-plugins-data-server.indexpatternsservice.updatesavedobject.md) | | Save existing index pattern. Will attempt to merge differences if there are conflicts |
diff --git a/docs/settings/alert-action-settings.asciidoc b/docs/settings/alert-action-settings.asciidoc
index 0f7ad56fe5e7a..2086a0490d052 100644
--- a/docs/settings/alert-action-settings.asciidoc
+++ b/docs/settings/alert-action-settings.asciidoc
@@ -13,59 +13,48 @@ Alerts and actions are enabled by default in {kib}, but require you configure th
You can configure the following settings in the `kibana.yml` file.
-
[float]
[[general-alert-action-settings]]
==== General settings
-[cols="2*<"]
-|===
-
-| `xpack.encryptedSavedObjects`
-`.encryptionKey`
- | A string of 32 or more characters used to encrypt sensitive properties on alerting rules and actions before they're stored in {es}. Third party credentials — such as the username and password used to connect to an SMTP service — are an example of encrypted properties. +
- +
- {kib} offers a <> to help generate this encryption key. +
- +
- If not set, {kib} will generate a random key on startup, but all alerting and action functions will be blocked. Generated keys are not allowed for alerting and actions because when a new key is generated on restart, existing encrypted data becomes inaccessible. For the same reason, alerting and actions in high-availability deployments of {kib} will behave unexpectedly if the key isn't the same on all instances of {kib}. +
- +
- Although the key can be specified in clear text in `kibana.yml`, it's recommended to store this key securely in the <>.
- Be sure to back up the encryption key value somewhere safe, as your alerting rules and actions will cease to function due to decryption failures should you lose it. If you want to rotate the encryption key, be sure to follow the instructions on <>.
-
-|===
+`xpack.encryptedSavedObjects.encryptionKey`::
+A string of 32 or more characters used to encrypt sensitive properties on alerting rules and actions before they're stored in {es}. Third party credentials — such as the username and password used to connect to an SMTP service — are an example of encrypted properties.
++
+{kib} offers a <> to help generate this encryption key.
++
+If not set, {kib} will generate a random key on startup, but all alerting and action functions will be blocked. Generated keys are not allowed for alerting and actions because when a new key is generated on restart, existing encrypted data becomes inaccessible. For the same reason, alerting and actions in high-availability deployments of {kib} will behave unexpectedly if the key isn't the same on all instances of {kib}.
++
+Although the key can be specified in clear text in `kibana.yml`, it's recommended to store this key securely in the <>.
+Be sure to back up the encryption key value somewhere safe, as your alerting rules and actions will cease to function due to decryption failures should you lose it. If you want to rotate the encryption key, be sure to follow the instructions on <>.
[float]
[[action-settings]]
==== Action settings
-[cols="2*<"]
-|===
-| `xpack.actions.enabled`
- | Deprecated. This will be removed in 8.0. Feature toggle that enables Actions in {kib}.
- If `false`, all features dependent on Actions are disabled, including the *Observability* and *Security* apps. Default: `true`.
-
-| `xpack.actions.allowedHosts` {ess-icon}
- | A list of hostnames that {kib} is allowed to connect to when built-in actions are triggered. It defaults to `[*]`, allowing any host, but keep in mind the potential for SSRF attacks when hosts are not explicitly added to the allowed hosts. An empty list `[]` can be used to block built-in actions from making any external connections. +
- +
- Note that hosts associated with built-in actions, such as Slack and PagerDuty, are not automatically added to allowed hosts. If you are not using the default `[*]` setting, you must ensure that the corresponding endpoints are added to the allowed hosts as well.
-
-| `xpack.actions.customHostSettings` {ess-icon}
- | A list of custom host settings to override existing global settings.
- Default: an empty list. +
- +
- Each entry in the list must have a `url` property, to associate a connection
- type (mail or https), hostname and port with the remaining options in the
- entry.
- +
- In the following example, two custom host settings
- are defined. The first provides a custom host setting for mail server
- `mail.example.com` using port 465 that supplies server certificate authorization
- data from both a file and inline, and requires TLS for the
- connection. The second provides a custom host setting for https server
- `webhook.example.com` which turns off server certificate authorization.
-
-|===
-
+`xpack.actions.enabled`::
+Feature toggle that enables Actions in {kib}.
+If `false`, all features dependent on Actions are disabled, including the *Observability* and *Security* apps. Default: `true`.
+
+`xpack.actions.allowedHosts` {ess-icon}::
+A list of hostnames that {kib} is allowed to connect to when built-in actions are triggered. It defaults to `[*]`, allowing any host, but keep in mind the potential for SSRF attacks when hosts are not explicitly added to the allowed hosts. An empty list `[]` can be used to block built-in actions from making any external connections.
++
+Note that hosts associated with built-in actions, such as Slack and PagerDuty, are not automatically added to allowed hosts. If you are not using the default `[*]` setting, you must ensure that the corresponding endpoints are added to the allowed hosts as well.
+
+`xpack.actions.customHostSettings` {ess-icon}::
+A list of custom host settings to override existing global settings.
+Default: an empty list.
++
+Each entry in the list must have a `url` property, to associate a connection
+type (mail or https), hostname and port with the remaining options in the
+entry.
++
+In the following example, two custom host settings
+are defined. The first provides a custom host setting for mail server
+`mail.example.com` using port 465 that supplies server certificate authorization
+data from both a file and inline, and requires TLS for the
+connection. The second provides a custom host setting for https server
+`webhook.example.com` which turns off server certificate authorization.
++
[source,yaml]
--
xpack.actions.customHostSettings:
@@ -86,132 +75,106 @@ xpack.actions.customHostSettings:
verificationMode: 'none'
--
-[cols="2*<"]
-|===
-
-| `xpack.actions.customHostSettings[n]`
-`.url` {ess-icon}
- | A URL associated with this custom host setting. Should be in the form of
- `protocol://hostname:port`, where `protocol` is `https` or `smtp`. If the
- port is not provided, 443 is used for `https` and 25 is used for
- `smtp`. The `smtp` URLs are used for the Email actions that use this
- server, and the `https` URLs are used for actions which use `https` to
- connect to services. +
- +
- Entries with `https` URLs can use the `ssl` options, and entries with `smtp`
- URLs can use both the `ssl` and `smtp` options. +
- +
- No other URL values should be part of this URL, including paths,
- query strings, and authentication information. When an http or smtp request
- is made as part of executing an action, only the protocol, hostname, and
- port of the URL for that request are used to look up these configuration
- values.
-
-| `xpack.actions.customHostSettings[n]`
-`.smtp.ignoreTLS` {ess-icon}
- | A boolean value indicating that TLS must not be used for this connection.
- The options `smtp.ignoreTLS` and `smtp.requireTLS` can not both be set to true.
-
-| `xpack.actions.customHostSettings[n]`
-`.smtp.requireTLS` {ess-icon}
- | A boolean value indicating that TLS must be used for this connection.
- The options `smtp.ignoreTLS` and `smtp.requireTLS` can not both be set to true.
-
-| `xpack.actions.customHostSettings[n]`
-`.ssl.rejectUnauthorized`
- | Deprecated. Use <> instead. A boolean value indicating whether to bypass server certificate validation.
- Overrides the general `xpack.actions.rejectUnauthorized` configuration
- for requests made for this hostname/port.
-
-|[[action-config-custom-host-verification-mode]] `xpack.actions.customHostSettings[n]`
-`.ssl.verificationMode` {ess-icon}
- | Controls the verification of the server certificate that {hosted-ems} receives when making an outbound SSL/TLS connection to the host server. Valid values are `full`, `certificate`, and `none`.
- Use `full` to perform hostname verification, `certificate` to skip hostname verification, and `none` to skip verification. Default: `full`. <>. Overrides the general `xpack.actions.ssl.verificationMode` configuration
- for requests made for this hostname/port.
-
-| `xpack.actions.customHostSettings[n]`
-`.ssl.certificateAuthoritiesFiles`
- | A file name or list of file names of PEM-encoded certificate files to use
- to validate the server.
-
-| `xpack.actions.customHostSettings[n]`
-`.ssl.certificateAuthoritiesData` {ess-icon}
- | The contents of a PEM-encoded certificate file, or multiple files appended
- into a single string. This configuration can be used for environments where
- the files cannot be made available.
-
-| `xpack.actions.enabledActionTypes` {ess-icon}
- | A list of action types that are enabled. It defaults to `[*]`, enabling all types. The names for built-in {kib} action types are prefixed with a `.` and include: `.server-log`, `.slack`, `.email`, `.index`, `.pagerduty`, and `.webhook`. An empty list `[]` will disable all action types. +
- +
- Disabled action types will not appear as an option when creating new connectors, but existing connectors and actions of that type will remain in {kib} and will not function.
-
-| `xpack.actions`
-`.preconfiguredAlertHistoryEsIndex` {ess-icon}
- | Enables a preconfigured alert history {es} <> connector. Default: `false`.
-
-| `xpack.actions.preconfigured`
- | Specifies preconfigured connector IDs and configs. Default: {}.
-
-| `xpack.actions.proxyUrl` {ess-icon}
- | Specifies the proxy URL to use, if using a proxy for actions. By default, no proxy is used.
-
-| `xpack.actions.proxyBypassHosts` {ess-icon}
- | Specifies hostnames which should not use the proxy, if using a proxy for actions. The value is an array of hostnames as strings. By default, all hosts will use the proxy, but if an action's hostname is in this list, the proxy will not be used. The settings `xpack.actions.proxyBypassHosts` and `xpack.actions.proxyOnlyHosts` cannot be used at the same time.
-
-| `xpack.actions.proxyOnlyHosts` {ess-icon}
- | Specifies hostnames which should only use the proxy, if using a proxy for actions. The value is an array of hostnames as strings. By default, no hosts will use the proxy, but if an action's hostname is in this list, the proxy will be used. The settings `xpack.actions.proxyBypassHosts` and `xpack.actions.proxyOnlyHosts` cannot be used at the same time.
-
-| `xpack.actions.proxyHeaders` {ess-icon}
- | Specifies HTTP headers for the proxy, if using a proxy for actions. Default: {}.
-
-a|`xpack.actions.`
-`proxyRejectUnauthorizedCertificates` {ess-icon}
- | Deprecated. Use <> instead. Set to `false` to bypass certificate validation for the proxy, if using a proxy for actions. Default: `true`.
-
-|[[action-config-proxy-verification-mode]]
-`xpack.actions[n]`
-`.ssl.proxyVerificationMode` {ess-icon}
-| Controls the verification for the proxy server certificate that {hosted-ems} receives when making an outbound SSL/TLS connection to the proxy server. Valid values are `full`, `certificate`, and `none`.
+`xpack.actions.customHostSettings[n].url` {ess-icon}::
+A URL associated with this custom host setting. Should be in the form of
+`protocol://hostname:port`, where `protocol` is `https` or `smtp`. If the
+port is not provided, 443 is used for `https` and 25 is used for
+`smtp`. The `smtp` URLs are used for the Email actions that use this
+server, and the `https` URLs are used for actions which use `https` to
+connect to services.
++
+Entries with `https` URLs can use the `ssl` options, and entries with `smtp`
+URLs can use both the `ssl` and `smtp` options.
++
+No other URL values should be part of this URL, including paths,
+query strings, and authentication information. When an http or smtp request
+is made as part of executing an action, only the protocol, hostname, and
+port of the URL for that request are used to look up these configuration
+values.
+
+`xpack.actions.customHostSettings[n].smtp.ignoreTLS` {ess-icon}::
+A boolean value indicating that TLS must not be used for this connection.
+The options `smtp.ignoreTLS` and `smtp.requireTLS` can not both be set to true.
+
+`xpack.actions.customHostSettings[n].smtp.requireTLS` {ess-icon}::
+A boolean value indicating that TLS must be used for this connection.
+The options `smtp.ignoreTLS` and `smtp.requireTLS` can not both be set to true.
+
+`xpack.actions.customHostSettings[n].ssl.rejectUnauthorized`::
+Deprecated. Use <> instead. A boolean value indicating whether to bypass server certificate validation.
+Overrides the general `xpack.actions.rejectUnauthorized` configuration
+for requests made for this hostname/port.
+
+[[action-config-custom-host-verification-mode]] `xpack.actions.customHostSettings[n].ssl.verificationMode` {ess-icon}::
+Controls the verification of the server certificate that {hosted-ems} receives when making an outbound SSL/TLS connection to the host server. Valid values are `full`, `certificate`, and `none`.
+Use `full` to perform hostname verification, `certificate` to skip hostname verification, and `none` to skip verification. Default: `full`. <>. Overrides the general `xpack.actions.ssl.verificationMode` configuration
+for requests made for this hostname/port.
+
+`xpack.actions.customHostSettings[n].ssl.certificateAuthoritiesFiles`::
+A file name or list of file names of PEM-encoded certificate files to use
+to validate the server.
+
+`xpack.actions.customHostSettings[n].ssl.certificateAuthoritiesData` {ess-icon}::
+The contents of a PEM-encoded certificate file, or multiple files appended
+into a single string. This configuration can be used for environments where
+the files cannot be made available.
+
+`xpack.actions.enabledActionTypes` {ess-icon}::
+A list of action types that are enabled. It defaults to `[*]`, enabling all types. The names for built-in {kib} action types are prefixed with a `.` and include: `.server-log`, `.slack`, `.email`, `.index`, `.pagerduty`, and `.webhook`. An empty list `[]` will disable all action types.
++
+Disabled action types will not appear as an option when creating new connectors, but existing connectors and actions of that type will remain in {kib} and will not function.
+
+`xpack.actions.preconfiguredAlertHistoryEsIndex` {ess-icon}::
+Enables a preconfigured alert history {es} <> connector. Default: `false`.
+
+`xpack.actions.preconfigured`::
+Specifies preconfigured connector IDs and configs. Default: {}.
+
+`xpack.actions.proxyUrl` {ess-icon}::
+Specifies the proxy URL to use, if using a proxy for actions. By default, no proxy is used.
+
+`xpack.actions.proxyBypassHosts` {ess-icon}::
+Specifies hostnames which should not use the proxy, if using a proxy for actions. The value is an array of hostnames as strings. By default, all hosts will use the proxy, but if an action's hostname is in this list, the proxy will not be used. The settings `xpack.actions.proxyBypassHosts` and `xpack.actions.proxyOnlyHosts` cannot be used at the same time.
+
+`xpack.actions.proxyOnlyHosts` {ess-icon}::
+Specifies hostnames which should only use the proxy, if using a proxy for actions. The value is an array of hostnames as strings. By default, no hosts will use the proxy, but if an action's hostname is in this list, the proxy will be used. The settings `xpack.actions.proxyBypassHosts` and `xpack.actions.proxyOnlyHosts` cannot be used at the same time.
+
+`xpack.actions.proxyHeaders` {ess-icon}::
+Specifies HTTP headers for the proxy, if using a proxy for actions. Default: {}.
+
+`xpack.actions.proxyRejectUnauthorizedCertificates` {ess-icon}::
+Deprecated. Use <> instead. Set to `false` to bypass certificate validation for the proxy, if using a proxy for actions. Default: `true`.
+
+[[action-config-proxy-verification-mode]]`xpack.actions[n].ssl.proxyVerificationMode` {ess-icon}::
+Controls the verification for the proxy server certificate that {hosted-ems} receives when making an outbound SSL/TLS connection to the proxy server. Valid values are `full`, `certificate`, and `none`.
Use `full` to perform hostname verification, `certificate` to skip hostname verification, and `none` to skip verification. Default: `full`. <>.
-| `xpack.actions.rejectUnauthorized` {ess-icon}
- | Deprecated. Use <> instead. Set to `false` to bypass certificate validation for actions. Default: `true`. +
- +
- As an alternative to setting `xpack.actions.rejectUnauthorized`, you can use the setting
- `xpack.actions.customHostSettings` to set SSL options for specific servers.
-
-|[[action-config-verification-mode]]
-`xpack.actions[n]`
-`.ssl.verificationMode` {ess-icon}
-| Controls the verification for the server certificate that {hosted-ems} receives when making an outbound SSL/TLS connection for actions. Valid values are `full`, `certificate`, and `none`.
- Use `full` to perform hostname verification, `certificate` to skip hostname verification, and `none` to skip verification. Default: `full`. <>. +
- +
- As an alternative to setting `xpack.actions.ssl.verificationMode`, you can use the setting
- `xpack.actions.customHostSettings` to set SSL options for specific servers.
-
+`xpack.actions.rejectUnauthorized` {ess-icon}::
+Deprecated. Use <> instead. Set to `false` to bypass certificate validation for actions. Default: `true`.
++
+As an alternative to setting `xpack.actions.rejectUnauthorized`, you can use the setting
+`xpack.actions.customHostSettings` to set SSL options for specific servers.
+[[action-config-verification-mode]] `xpack.actions[n].ssl.verificationMode` {ess-icon}::
+Controls the verification for the server certificate that {hosted-ems} receives when making an outbound SSL/TLS connection for actions. Valid values are `full`, `certificate`, and `none`.
+Use `full` to perform hostname verification, `certificate` to skip hostname verification, and `none` to skip verification. Default: `full`. <>.
++
+As an alternative to setting `xpack.actions.ssl.verificationMode`, you can use the setting
+`xpack.actions.customHostSettings` to set SSL options for specific servers.
-| `xpack.actions.maxResponseContentLength` {ess-icon}
- | Specifies the max number of bytes of the http response for requests to external resources. Default: 1000000 (1MB).
-
-| `xpack.actions.responseTimeout` {ess-icon}
- | Specifies the time allowed for requests to external resources. Requests that take longer are aborted. The time is formatted as: +
- +
- `[ms,s,m,h,d,w,M,Y]` +
- +
- For example, `20m`, `24h`, `7d`, `1w`. Default: `60s`.
-
+`xpack.actions.maxResponseContentLength` {ess-icon}::
+Specifies the max number of bytes of the http response for requests to external resources. Default: 1000000 (1MB).
-|===
+`xpack.actions.responseTimeout` {ess-icon}::
+Specifies the time allowed for requests to external resources. Requests that take longer are aborted. The time is formatted as:
++
+`[ms,s,m,h,d,w,M,Y]`
++
+For example, `20m`, `24h`, `7d`, `1w`. Default: `60s`.
[float]
[[alert-settings]]
==== Alerting settings
-[cols="2*<"]
-|===
-
-| `xpack.alerting.maxEphemeralActionsPerAlert`
- | Sets the number of actions that will be executed ephemerally. To use this, enable ephemeral tasks in task manager first with <>
-
-|===
+`xpack.alerting.maxEphemeralActionsPerAlert`::
+Sets the number of actions that will be executed ephemerally. To use this, enable ephemeral tasks in task manager first with <>
diff --git a/docs/settings/banners-settings.asciidoc b/docs/settings/banners-settings.asciidoc
index ce56d4dbe7a4d..43f1724403595 100644
--- a/docs/settings/banners-settings.asciidoc
+++ b/docs/settings/banners-settings.asciidoc
@@ -14,25 +14,17 @@ You can configure the `xpack.banners` settings in your `kibana.yml` file.
Banners are a https://www.elastic.co/subscriptions[subscription feature].
====
-[[general-banners-settings-kb]]
-==== General banner settings
+`xpack.banners.placement`::
+Set to `top` to display a banner above the Elastic header. Defaults to `disabled`.
-[cols="2*<"]
-|===
+`xpack.banners.textContent`::
+The text to display inside the banner, either plain text or Markdown.
-| `xpack.banners.placement`
-| Set to `top` to display a banner above the Elastic header. Defaults to `disabled`.
+`xpack.banners.textColor`::
+The color for the banner text. Defaults to `#8A6A0A`.
-| `xpack.banners.textContent`
-| The text to display inside the banner, either plain text or Markdown.
+`xpack.banners.backgroundColor`::
+The color of the banner background. Defaults to `#FFF9E8`.
-| `xpack.banners.textColor`
-| The color for the banner text. Defaults to `#8A6A0A`.
-
-| `xpack.banners.backgroundColor`
-| The color of the banner background. Defaults to `#FFF9E8`.
-
-| `xpack.banners.disableSpaceBanners`
-| If true, per-space banner overrides will be disabled. Defaults to `false`.
-
-|===
+`xpack.banners.disableSpaceBanners`::
+If true, per-space banner overrides will be disabled. Defaults to `false`.
diff --git a/docs/settings/dev-settings.asciidoc b/docs/settings/dev-settings.asciidoc
index 810694f46b317..b7edf36851d91 100644
--- a/docs/settings/dev-settings.asciidoc
+++ b/docs/settings/dev-settings.asciidoc
@@ -12,31 +12,20 @@ They are enabled by default.
[[grok-settings]]
==== Grok Debugger settings
-[cols="2*<"]
-|===
-| `xpack.grokdebugger.enabled` {ess-icon}
- | Set to `true` to enable the <>. Defaults to `true`.
+`xpack.grokdebugger.enabled` {ess-icon}::
+Set to `true` to enable the <>. Defaults to `true`.
-|===
[float]
[[profiler-settings]]
==== {searchprofiler} settings
-[cols="2*<"]
-|===
-| `xpack.searchprofiler.enabled`
- | Set to `true` to enable the <>. Defaults to `true`.
-
-|===
+`xpack.searchprofiler.enabled`::
+Set to `true` to enable the <>. Defaults to `true`.
[float]
[[painless_lab-settings]]
==== Painless Lab settings
-[cols="2*<"]
-|===
-| `xpack.painless_lab.enabled`
- | When set to `true`, enables the <>. Defaults to `true`.
-
-|===
+`xpack.painless_lab.enabled`::
+When set to `true`, enables the <>. Defaults to `true`.
diff --git a/docs/settings/graph-settings.asciidoc b/docs/settings/graph-settings.asciidoc
index 876e3dc936ccf..093edb0d08547 100644
--- a/docs/settings/graph-settings.asciidoc
+++ b/docs/settings/graph-settings.asciidoc
@@ -7,13 +7,5 @@
You do not need to configure any settings to use the {graph-features}.
-[float]
-[[general-graph-settings]]
-==== General graph settings
-
-[cols="2*<"]
-|===
-| `xpack.graph.enabled` {ess-icon}
- | Set to `false` to disable the {graph-features}.
-
-|===
+`xpack.graph.enabled` {ess-icon}::
+Set to `false` to disable the {graph-features}.
diff --git a/docs/settings/monitoring-settings.asciidoc b/docs/settings/monitoring-settings.asciidoc
index 6483442248cea..31148f0abf4e1 100644
--- a/docs/settings/monitoring-settings.asciidoc
+++ b/docs/settings/monitoring-settings.asciidoc
@@ -37,6 +37,10 @@ For more information, see
monitoring back-end does not run and {kib} stats are not sent to the monitoring
cluster.
+| `monitoring.ui.ccs.enabled`
+ | Set to `true` (default) to enable {ref}/modules-cross-cluster-search.html[cross-cluster search] of your monitoring data. The {ref}/modules-remote-clusters.html#remote-cluster-settings[`remote_cluster_client`] role must exist on each node.
+
+
| `monitoring.ui.elasticsearch.hosts`
| Specifies the location of the {es} cluster where your monitoring data is stored.
By default, this is the same as <>. This setting enables
diff --git a/docs/user/monitoring/kibana-alerts.asciidoc b/docs/user/monitoring/kibana-alerts.asciidoc
index f00a3999ab277..64ba8bf044e4f 100644
--- a/docs/user/monitoring/kibana-alerts.asciidoc
+++ b/docs/user/monitoring/kibana-alerts.asciidoc
@@ -124,7 +124,7 @@ valid for 30 days.
== Alerts and rules
[discrete]
=== Create default rules
-This option can be used to create default rules in this kibana spaces. This is
+This option can be used to create default rules in this kibana space. This is
useful for scenarios when you didn't choose to create these default rules initially
or anytime later if the rules were accidentally deleted.
diff --git a/examples/search_examples/public/search/app.tsx b/examples/search_examples/public/search/app.tsx
index 06f9426b4965c..bfb41160ae963 100644
--- a/examples/search_examples/public/search/app.tsx
+++ b/examples/search_examples/public/search/app.tsx
@@ -131,12 +131,46 @@ export const SearchExamplesApp = ({
setSelectedNumericField(fields?.length ? getNumeric(fields)[0] : null);
}, [fields]);
- const doAsyncSearch = async (strategy?: string, sessionId?: string) => {
+ const doAsyncSearch = async (
+ strategy?: string,
+ sessionId?: string,
+ addWarning: boolean = false,
+ addError: boolean = false
+ ) => {
if (!indexPattern || !selectedNumericField) return;
// Construct the query portion of the search request
const query = data.query.getEsQuery(indexPattern);
+ if (addWarning) {
+ query.bool.must.push({
+ // @ts-ignore
+ error_query: {
+ indices: [
+ {
+ name: indexPattern.title,
+ error_type: 'warning',
+ message: 'Watch out!',
+ },
+ ],
+ },
+ });
+ }
+ if (addError) {
+ query.bool.must.push({
+ // @ts-ignore
+ error_query: {
+ indices: [
+ {
+ name: indexPattern.title,
+ error_type: 'exception',
+ message: 'Watch out!',
+ },
+ ],
+ },
+ });
+ }
+
// Construct the aggregations portion of the search request by using the `data.search.aggs` service.
const aggs = [{ type: 'avg', params: { field: selectedNumericField!.name } }];
const aggsDsl = data.search.aggs.createAggConfigs(indexPattern, aggs).toDsl();
@@ -193,14 +227,23 @@ export const SearchExamplesApp = ({
}
);
searchSubscription$.unsubscribe();
+ if (res.warning) {
+ notifications.toasts.addWarning({
+ title: 'Warning',
+ text: mountReactNode(res.warning),
+ });
+ }
} else if (isErrorResponse(res)) {
// TODO: Make response error status clearer
- notifications.toasts.addWarning('An error has occurred');
+ notifications.toasts.addDanger('An error has occurred');
searchSubscription$.unsubscribe();
}
},
- error: () => {
- notifications.toasts.addDanger('Failed to run search');
+ error: (e) => {
+ notifications.toasts.addDanger({
+ title: 'Failed to run search',
+ text: e.message,
+ });
},
});
};
@@ -270,6 +313,14 @@ export const SearchExamplesApp = ({
doAsyncSearch('myStrategy');
};
+ const onWarningSearchClickHandler = () => {
+ doAsyncSearch(undefined, undefined, true);
+ };
+
+ const onErrorSearchClickHandler = () => {
+ doAsyncSearch(undefined, undefined, false, true);
+ };
+
const onPartialResultsClickHandler = () => {
setSelectedTab(1);
const req = {
@@ -299,8 +350,11 @@ export const SearchExamplesApp = ({
searchSubscription$.unsubscribe();
}
},
- error: () => {
- notifications.toasts.addDanger('Failed to run search');
+ error: (e) => {
+ notifications.toasts.addDanger({
+ title: 'Failed to run search',
+ text: e.message,
+ });
},
});
};
@@ -530,6 +584,38 @@ export const SearchExamplesApp = ({
+
+
Handling errors & warnings
+
+
+ When fetching data from Elasticsearch, there are several different ways warnings and
+ errors may be returned. In general, it is recommended to surface these in the UX.
+
+
+
+
+
+
+
+
+
+
Handling partial results
diff --git a/jest.config.js b/jest.config.js
index bd1e865a7e64a..09532dc28bbb2 100644
--- a/jest.config.js
+++ b/jest.config.js
@@ -13,6 +13,8 @@ module.exports = {
'/packages/*/jest.config.js',
'/src/*/jest.config.js',
'/src/plugins/*/jest.config.js',
+ '/src/plugins/chart_expressions/*/jest.config.js',
+ '/src/plugins/vis_types/*/jest.config.js',
'/test/*/jest.config.js',
'/x-pack/plugins/*/jest.config.js',
],
diff --git a/package.json b/package.json
index 7d6698a77a302..cadd067299554 100644
--- a/package.json
+++ b/package.json
@@ -92,9 +92,9 @@
"dependencies": {
"@elastic/apm-rum": "^5.8.0",
"@elastic/apm-rum-react": "^1.2.11",
- "@elastic/charts": "34.0.0",
+ "@elastic/charts": "34.1.1",
"@elastic/datemath": "link:bazel-bin/packages/elastic-datemath",
- "@elastic/elasticsearch": "npm:@elastic/elasticsearch-canary@^7.15.0-canary.3",
+ "@elastic/elasticsearch": "npm:@elastic/elasticsearch-canary@^7.16.0-canary.1",
"@elastic/ems-client": "7.15.0",
"@elastic/eui": "37.1.1",
"@elastic/filesaver": "1.1.2",
diff --git a/packages/kbn-alerts/.babelrc b/packages/kbn-alerts/.babelrc
new file mode 100644
index 0000000000000..40a198521b903
--- /dev/null
+++ b/packages/kbn-alerts/.babelrc
@@ -0,0 +1,4 @@
+{
+ "presets": ["@kbn/babel-preset/node_preset"],
+ "ignore": ["**/*.test.ts", "**/*.test.tsx"]
+}
diff --git a/packages/kbn-alerts/.babelrc.browser b/packages/kbn-alerts/.babelrc.browser
new file mode 100644
index 0000000000000..71bbfbcd6eb2f
--- /dev/null
+++ b/packages/kbn-alerts/.babelrc.browser
@@ -0,0 +1,4 @@
+{
+ "presets": ["@kbn/babel-preset/webpack_preset"],
+ "ignore": ["**/*.test.ts", "**/*.test.tsx"]
+}
diff --git a/packages/kbn-alerts/BUILD.bazel b/packages/kbn-alerts/BUILD.bazel
index c585b4430bfcb..a571380202cd6 100644
--- a/packages/kbn-alerts/BUILD.bazel
+++ b/packages/kbn-alerts/BUILD.bazel
@@ -1,5 +1,6 @@
load("@npm//@bazel/typescript:index.bzl", "ts_config", "ts_project")
load("@build_bazel_rules_nodejs//:index.bzl", "js_library", "pkg_npm")
+load("//src/dev/bazel:index.bzl", "jsts_transpiler")
PKG_BASE_NAME = "kbn-alerts"
@@ -12,8 +13,7 @@ SOURCE_FILES = glob(
],
exclude = [
"**/*.test.*",
- "**/*.mock.*",
- "**/*.mocks.*",
+ "**/__snapshots__"
],
)
@@ -25,32 +25,40 @@ filegroup(
)
NPM_MODULE_EXTRA_FILES = [
- "react/package.json",
"package.json",
"README.md",
]
-SRC_DEPS = [
- "//packages/kbn-babel-preset",
- "//packages/kbn-dev-utils",
+RUNTIME_DEPS = [
"//packages/kbn-i18n",
- "@npm//@babel/core",
- "@npm//babel-loader",
"@npm//@elastic/eui",
+ "@npm//enzyme",
"@npm//react",
"@npm//resize-observer-polyfill",
- "@npm//rxjs",
- "@npm//tslib",
]
TYPES_DEPS = [
- "@npm//typescript",
+ "//packages/kbn-i18n",
+ "@npm//@elastic/eui",
+ "@npm//resize-observer-polyfill",
+ "@npm//@types/enzyme",
"@npm//@types/jest",
"@npm//@types/node",
"@npm//@types/react",
]
-DEPS = SRC_DEPS + TYPES_DEPS
+jsts_transpiler(
+ name = "target_node",
+ srcs = SRCS,
+ build_pkg_name = package_name(),
+)
+
+jsts_transpiler(
+ name = "target_web",
+ srcs = SRCS,
+ build_pkg_name = package_name(),
+ config_file = ".babelrc.browser"
+)
ts_config(
name = "tsconfig",
@@ -61,50 +69,26 @@ ts_config(
],
)
-ts_config(
- name = "tsconfig_browser",
- src = "tsconfig.browser.json",
- deps = [
- "//:tsconfig.base.json",
- "//:tsconfig.browser.json",
- "//:tsconfig.browser_bazel.json",
- ],
-)
-
ts_project(
- name = "tsc",
+ name = "tsc_types",
args = ["--pretty"],
srcs = SRCS,
- deps = DEPS,
- allow_js = True,
+ deps = TYPES_DEPS,
declaration = True,
- declaration_dir = "target_types",
declaration_map = True,
- out_dir = "target_node",
+ emit_declaration_only = True,
+ out_dir = "target_types",
root_dir = "src",
source_map = True,
tsconfig = ":tsconfig",
)
-ts_project(
- name = "tsc_browser",
- args = ['--pretty'],
- srcs = SRCS,
- deps = DEPS,
- allow_js = True,
- declaration = False,
- out_dir = "target_web",
- source_map = True,
- root_dir = "src",
- tsconfig = ":tsconfig_browser",
-)
-
js_library(
name = PKG_BASE_NAME,
- package_name = PKG_REQUIRE_NAME,
srcs = NPM_MODULE_EXTRA_FILES,
+ deps = RUNTIME_DEPS + [":target_node", ":target_web", ":tsc_types"],
+ package_name = PKG_REQUIRE_NAME,
visibility = ["//visibility:public"],
- deps = [":tsc", ":tsc_browser"] + DEPS,
)
pkg_npm(
@@ -120,4 +104,4 @@ filegroup(
":npm_module",
],
visibility = ["//visibility:public"],
-)
\ No newline at end of file
+)
diff --git a/packages/kbn-alerts/react/package.json b/packages/kbn-alerts/react/package.json
deleted file mode 100644
index c5f222b5843ac..0000000000000
--- a/packages/kbn-alerts/react/package.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "browser": "../target_web/react",
- "main": "../target_node/react",
- "types": "../target_types/react/index.d.ts"
-}
diff --git a/packages/kbn-alerts/tsconfig.browser.json b/packages/kbn-alerts/tsconfig.browser.json
deleted file mode 100644
index bb58f529eb0bb..0000000000000
--- a/packages/kbn-alerts/tsconfig.browser.json
+++ /dev/null
@@ -1,22 +0,0 @@
-{
- "extends": "../../tsconfig.browser_bazel.json",
- "compilerOptions": {
- "allowJs": true,
- "outDir": "./target_web",
- "declaration": false,
- "isolatedModules": true,
- "sourceMap": true,
- "sourceRoot": "../../../../../packages/kbn-alerts/src",
- "types": [
- "jest",
- "node"
- ],
- },
- "include": [
- "src/**/*.ts",
- "src/**/*.tsx",
- ],
- "exclude": [
- "**/__fixtures__/**/*"
- ]
-}
\ No newline at end of file
diff --git a/packages/kbn-alerts/tsconfig.json b/packages/kbn-alerts/tsconfig.json
index 6a791ca2e5844..fa18a40744354 100644
--- a/packages/kbn-alerts/tsconfig.json
+++ b/packages/kbn-alerts/tsconfig.json
@@ -1,15 +1,14 @@
{
"extends": "../../tsconfig.bazel.json",
"compilerOptions": {
- "allowJs": true,
- "declarationDir": "./target_types",
- "outDir": "target_node",
"declaration": true,
"declarationMap": true,
+ "emitDeclarationOnly": true,
+ "outDir": "target_types",
+ "rootDir": "src",
"sourceMap": true,
"sourceRoot": "../../../../packages/kbn-alerts/src",
- "rootDir": "src",
"types": ["jest", "node", "resize-observer-polyfill"]
},
- "include": ["src/**/*"]
-}
\ No newline at end of file
+ "include": ["src/**/*"],
+}
diff --git a/packages/kbn-config/src/config_service.test.ts b/packages/kbn-config/src/config_service.test.ts
index d09c61a1c2110..aa520e7189e54 100644
--- a/packages/kbn-config/src/config_service.test.ts
+++ b/packages/kbn-config/src/config_service.test.ts
@@ -13,7 +13,7 @@ import { mockApplyDeprecations, mockedChangedPaths } from './config_service.test
import { rawConfigServiceMock } from './raw/raw_config_service.mock';
import { schema } from '@kbn/config-schema';
-import { MockedLogger, loggerMock } from '@kbn/logging/target/mocks';
+import { MockedLogger, loggerMock } from '@kbn/logging/mocks';
import { ConfigService, Env, RawPackageInfo } from '.';
diff --git a/packages/kbn-interpreter/.babelrc b/packages/kbn-interpreter/.babelrc
new file mode 100644
index 0000000000000..7da72d1779128
--- /dev/null
+++ b/packages/kbn-interpreter/.babelrc
@@ -0,0 +1,3 @@
+{
+ "presets": ["@kbn/babel-preset/node_preset"]
+}
diff --git a/packages/kbn-interpreter/BUILD.bazel b/packages/kbn-interpreter/BUILD.bazel
index 903f892b64f3f..52df0f0aa8d85 100644
--- a/packages/kbn-interpreter/BUILD.bazel
+++ b/packages/kbn-interpreter/BUILD.bazel
@@ -1,6 +1,7 @@
load("@npm//@bazel/typescript:index.bzl", "ts_config", "ts_project")
load("@npm//peggy:index.bzl", "peggy")
load("@build_bazel_rules_nodejs//:index.bzl", "js_library", "pkg_npm")
+load("//src/dev/bazel:index.bzl", "jsts_transpiler")
PKG_BASE_NAME = "kbn-interpreter"
PKG_REQUIRE_NAME = "@kbn/interpreter"
@@ -25,7 +26,7 @@ NPM_MODULE_EXTRA_FILES = [
"package.json",
]
-SRC_DEPS = [
+RUNTIME_DEPS = [
"@npm//lodash",
]
@@ -35,7 +36,11 @@ TYPES_DEPS = [
"@npm//@types/node",
]
-DEPS = SRC_DEPS + TYPES_DEPS
+jsts_transpiler(
+ name = "target_node",
+ srcs = SRCS,
+ build_pkg_name = package_name(),
+)
peggy(
name = "grammar",
@@ -62,14 +67,15 @@ ts_config(
)
ts_project(
- name = "tsc",
+ name = "tsc_types",
args = ['--pretty'],
srcs = SRCS,
- deps = DEPS,
+ deps = TYPES_DEPS,
allow_js = True,
declaration = True,
declaration_map = True,
- out_dir = "target",
+ emit_declaration_only = True,
+ out_dir = "target_types",
source_map = True,
root_dir = "src",
tsconfig = ":tsconfig",
@@ -78,7 +84,7 @@ ts_project(
js_library(
name = PKG_BASE_NAME,
srcs = NPM_MODULE_EXTRA_FILES + [":grammar"],
- deps = DEPS + [":tsc"],
+ deps = RUNTIME_DEPS + [":target_node", ":tsc_types"],
package_name = PKG_REQUIRE_NAME,
visibility = ["//visibility:public"],
)
diff --git a/packages/kbn-interpreter/common/package.json b/packages/kbn-interpreter/common/package.json
index 2f5277a8e8652..6d03f2e1c6236 100644
--- a/packages/kbn-interpreter/common/package.json
+++ b/packages/kbn-interpreter/common/package.json
@@ -1,5 +1,5 @@
{
"private": true,
- "main": "../target/common/index.js",
- "types": "../target/common/index.d.ts"
+ "main": "../target_node/common/index.js",
+ "types": "../target_types/common/index.d.ts"
}
\ No newline at end of file
diff --git a/packages/kbn-interpreter/src/common/lib/ast.from_expression.test.js b/packages/kbn-interpreter/src/common/lib/ast.from_expression.test.js
index 608fe63b0b825..ae35a14482324 100644
--- a/packages/kbn-interpreter/src/common/lib/ast.from_expression.test.js
+++ b/packages/kbn-interpreter/src/common/lib/ast.from_expression.test.js
@@ -6,7 +6,7 @@
* Side Public License, v 1.
*/
-import { fromExpression } from '@kbn/interpreter/target/common/lib/ast';
+import { fromExpression } from '@kbn/interpreter/common';
import { getType } from './get_type';
describe('ast fromExpression', () => {
diff --git a/packages/kbn-interpreter/tsconfig.json b/packages/kbn-interpreter/tsconfig.json
index 74ec484ea63e9..60f8c76cf8809 100644
--- a/packages/kbn-interpreter/tsconfig.json
+++ b/packages/kbn-interpreter/tsconfig.json
@@ -2,9 +2,10 @@
"extends": "../../tsconfig.bazel.json",
"compilerOptions": {
"allowJs": true,
- "outDir": "./target/types",
"declaration": true,
"declarationMap": true,
+ "emitDeclarationOnly": true,
+ "outDir": "./target_types",
"rootDir": "src",
"sourceMap": true,
"sourceRoot": "../../../../packages/kbn-interpreter/src",
diff --git a/packages/kbn-logging/.babelrc b/packages/kbn-logging/.babelrc
new file mode 100644
index 0000000000000..7da72d1779128
--- /dev/null
+++ b/packages/kbn-logging/.babelrc
@@ -0,0 +1,3 @@
+{
+ "presets": ["@kbn/babel-preset/node_preset"]
+}
diff --git a/packages/kbn-logging/BUILD.bazel b/packages/kbn-logging/BUILD.bazel
index 1a3fa851a3957..71a7ece15aa73 100644
--- a/packages/kbn-logging/BUILD.bazel
+++ b/packages/kbn-logging/BUILD.bazel
@@ -1,5 +1,6 @@
load("@npm//@bazel/typescript:index.bzl", "ts_config", "ts_project")
load("@build_bazel_rules_nodejs//:index.bzl", "js_library", "pkg_npm")
+load("//src/dev/bazel:index.bzl", "jsts_transpiler")
PKG_BASE_NAME = "kbn-logging"
PKG_REQUIRE_NAME = "@kbn/logging"
@@ -21,20 +22,26 @@ filegroup(
)
NPM_MODULE_EXTRA_FILES = [
+ "mocks/package.json",
"package.json",
"README.md"
]
-SRC_DEPS = [
+RUNTIME_DEPS = [
"//packages/kbn-std"
]
TYPES_DEPS = [
+ "//packages/kbn-std",
"@npm//@types/jest",
"@npm//@types/node",
]
-DEPS = SRC_DEPS + TYPES_DEPS
+jsts_transpiler(
+ name = "target_node",
+ srcs = SRCS,
+ build_pkg_name = package_name(),
+)
ts_config(
name = "tsconfig",
@@ -46,13 +53,14 @@ ts_config(
)
ts_project(
- name = "tsc",
+ name = "tsc_types",
args = ['--pretty'],
srcs = SRCS,
- deps = DEPS,
+ deps = TYPES_DEPS,
declaration = True,
declaration_map = True,
- out_dir = "target",
+ emit_declaration_only = True,
+ out_dir = "target_types",
source_map = True,
root_dir = "src",
tsconfig = ":tsconfig",
@@ -61,7 +69,7 @@ ts_project(
js_library(
name = PKG_BASE_NAME,
srcs = NPM_MODULE_EXTRA_FILES,
- deps = DEPS + [":tsc"],
+ deps = RUNTIME_DEPS + [":target_node", ":tsc_types"],
package_name = PKG_REQUIRE_NAME,
visibility = ["//visibility:public"],
)
diff --git a/packages/kbn-logging/mocks/package.json b/packages/kbn-logging/mocks/package.json
new file mode 100644
index 0000000000000..8410f557e9524
--- /dev/null
+++ b/packages/kbn-logging/mocks/package.json
@@ -0,0 +1,5 @@
+{
+ "private": true,
+ "main": "../target_node/mocks/index.js",
+ "types": "../target_types/mocks/index.d.ts"
+}
\ No newline at end of file
diff --git a/packages/kbn-logging/package.json b/packages/kbn-logging/package.json
index d80cc1c40d7e1..c35c2f5d06095 100644
--- a/packages/kbn-logging/package.json
+++ b/packages/kbn-logging/package.json
@@ -3,6 +3,6 @@
"version": "1.0.0",
"private": true,
"license": "SSPL-1.0 OR Elastic License 2.0",
- "main": "./target/index.js",
- "types": "./target/index.d.ts"
+ "main": "./target_node/index.js",
+ "types": "./target_types/index.d.ts"
}
\ No newline at end of file
diff --git a/packages/kbn-logging/tsconfig.json b/packages/kbn-logging/tsconfig.json
index aaf79da229a86..a6fb0f2f73187 100644
--- a/packages/kbn-logging/tsconfig.json
+++ b/packages/kbn-logging/tsconfig.json
@@ -1,13 +1,14 @@
{
"extends": "../../tsconfig.bazel.json",
"compilerOptions": {
- "outDir": "target",
- "stripInternal": false,
"declaration": true,
"declarationMap": true,
+ "emitDeclarationOnly": true,
+ "outDir": "target_types",
"rootDir": "src",
"sourceMap": true,
"sourceRoot": "../../../../packages/kbn-logging/src",
+ "stripInternal": false,
"types": [
"jest",
"node"
diff --git a/packages/kbn-optimizer/.babelrc b/packages/kbn-optimizer/.babelrc
new file mode 100644
index 0000000000000..1685d1644d94a
--- /dev/null
+++ b/packages/kbn-optimizer/.babelrc
@@ -0,0 +1,4 @@
+{
+ "presets": ["@kbn/babel-preset/node_preset"],
+ "ignore": ["**/*.test.js"]
+}
diff --git a/packages/kbn-optimizer/BUILD.bazel b/packages/kbn-optimizer/BUILD.bazel
index ddf2a05519682..7f04aa4b262b0 100644
--- a/packages/kbn-optimizer/BUILD.bazel
+++ b/packages/kbn-optimizer/BUILD.bazel
@@ -1,5 +1,6 @@
load("@npm//@bazel/typescript:index.bzl", "ts_config", "ts_project")
load("@build_bazel_rules_nodejs//:index.bzl", "js_library", "pkg_npm")
+load("//src/dev/bazel:index.bzl", "jsts_transpiler")
PKG_BASE_NAME = "kbn-optimizer"
PKG_REQUIRE_NAME = "@kbn/optimizer"
@@ -29,7 +30,7 @@ NPM_MODULE_EXTRA_FILES = [
"README.md"
]
-SRC_DEPS = [
+RUNTIME_DEPS = [
"//packages/kbn-config",
"//packages/kbn-dev-utils",
"//packages/kbn-std",
@@ -59,6 +60,22 @@ SRC_DEPS = [
]
TYPES_DEPS = [
+ "//packages/kbn-config",
+ "//packages/kbn-dev-utils",
+ "//packages/kbn-std",
+ "//packages/kbn-ui-shared-deps",
+ "//packages/kbn-utils",
+ "@npm//chalk",
+ "@npm//clean-webpack-plugin",
+ "@npm//cpy",
+ "@npm//del",
+ "@npm//execa",
+ "@npm//jest-diff",
+ "@npm//lmdb-store",
+ "@npm//pirates",
+ "@npm//resize-observer-polyfill",
+ "@npm//rxjs",
+ "@npm//zlib",
"@npm//@types/compression-webpack-plugin",
"@npm//@types/jest",
"@npm//@types/json-stable-stringify",
@@ -72,7 +89,11 @@ TYPES_DEPS = [
"@npm//@types/webpack-sources",
]
-DEPS = SRC_DEPS + TYPES_DEPS
+jsts_transpiler(
+ name = "target_node",
+ srcs = SRCS,
+ build_pkg_name = package_name(),
+)
ts_config(
name = "tsconfig",
@@ -84,13 +105,14 @@ ts_config(
)
ts_project(
- name = "tsc",
+ name = "tsc_types",
args = ['--pretty'],
srcs = SRCS,
- deps = DEPS,
+ deps = TYPES_DEPS,
declaration = True,
declaration_map = True,
- out_dir = "target",
+ emit_declaration_only = True,
+ out_dir = "target_types",
source_map = True,
root_dir = "src",
tsconfig = ":tsconfig",
@@ -99,7 +121,7 @@ ts_project(
js_library(
name = PKG_BASE_NAME,
srcs = NPM_MODULE_EXTRA_FILES,
- deps = DEPS + [":tsc"],
+ deps = RUNTIME_DEPS + [":target_node", ":tsc_types"],
package_name = PKG_REQUIRE_NAME,
visibility = ["//visibility:public"],
)
diff --git a/packages/kbn-optimizer/limits.yml b/packages/kbn-optimizer/limits.yml
index b405fbbe8fafc..4dbb4ad51fa81 100644
--- a/packages/kbn-optimizer/limits.yml
+++ b/packages/kbn-optimizer/limits.yml
@@ -119,4 +119,4 @@ pageLoadAssetSize:
expressionImage: 19288
expressionMetric: 22238
expressionShape: 34008
-
+ expressionTagcloud: 27505
diff --git a/packages/kbn-optimizer/package.json b/packages/kbn-optimizer/package.json
index d23512f7c418d..488e1b5dbfde8 100644
--- a/packages/kbn-optimizer/package.json
+++ b/packages/kbn-optimizer/package.json
@@ -3,6 +3,6 @@
"version": "1.0.0",
"private": true,
"license": "SSPL-1.0 OR Elastic License 2.0",
- "main": "./target/index.js",
- "types": "./target/index.d.ts"
+ "main": "./target_node/index.js",
+ "types": "./target_types/index.d.ts"
}
\ No newline at end of file
diff --git a/packages/kbn-optimizer/src/integration_tests/basic_optimization.test.ts b/packages/kbn-optimizer/src/integration_tests/basic_optimization.test.ts
index 646c279cd1346..c1fa2994bbe52 100644
--- a/packages/kbn-optimizer/src/integration_tests/basic_optimization.test.ts
+++ b/packages/kbn-optimizer/src/integration_tests/basic_optimization.test.ts
@@ -132,7 +132,7 @@ it('builds expected bundles, saves bundle counts to metadata', async () => {
expect(foo.cache.getModuleCount()).toBe(6);
expect(foo.cache.getReferencedFiles()).toMatchInlineSnapshot(`
Array [
- /packages/kbn-optimizer/src/__fixtures__/__tmp__/mock_repo/bazel-out/-fastbuild/bin/packages/kbn-ui-shared-deps/target/public_path_module_creator.js,
+ /packages/kbn-optimizer/src/__fixtures__/__tmp__/mock_repo/bazel-out/-fastbuild/bin/packages/kbn-ui-shared-deps/target_node/public_path_module_creator.js,
/packages/kbn-optimizer/src/__fixtures__/__tmp__/mock_repo/plugins/foo/kibana.json,
/packages/kbn-optimizer/src/__fixtures__/__tmp__/mock_repo/plugins/foo/public/async_import.ts,
/packages/kbn-optimizer/src/__fixtures__/__tmp__/mock_repo/plugins/foo/public/ext.ts,
@@ -155,7 +155,7 @@ it('builds expected bundles, saves bundle counts to metadata', async () => {
/node_modules/@kbn/optimizer/postcss.config.js,
/node_modules/css-loader/package.json,
/node_modules/style-loader/package.json,
- /packages/kbn-optimizer/src/__fixtures__/__tmp__/mock_repo/bazel-out/-fastbuild/bin/packages/kbn-ui-shared-deps/target/public_path_module_creator.js,
+ /packages/kbn-optimizer/src/__fixtures__/__tmp__/mock_repo/bazel-out/-fastbuild/bin/packages/kbn-ui-shared-deps/target_node/public_path_module_creator.js,
/packages/kbn-optimizer/src/__fixtures__/__tmp__/mock_repo/plugins/bar/kibana.json,
/packages/kbn-optimizer/src/__fixtures__/__tmp__/mock_repo/plugins/bar/public/index.scss,
/packages/kbn-optimizer/src/__fixtures__/__tmp__/mock_repo/plugins/bar/public/index.ts,
@@ -175,7 +175,7 @@ it('builds expected bundles, saves bundle counts to metadata', async () => {
expect(baz.cache.getReferencedFiles()).toMatchInlineSnapshot(`
Array [
- /packages/kbn-optimizer/src/__fixtures__/__tmp__/mock_repo/bazel-out/-fastbuild/bin/packages/kbn-ui-shared-deps/target/public_path_module_creator.js,
+ /packages/kbn-optimizer/src/__fixtures__/__tmp__/mock_repo/bazel-out/-fastbuild/bin/packages/kbn-ui-shared-deps/target_node/public_path_module_creator.js,
/packages/kbn-optimizer/src/__fixtures__/__tmp__/mock_repo/x-pack/baz/kibana.json,
/packages/kbn-optimizer/src/__fixtures__/__tmp__/mock_repo/x-pack/baz/public/index.ts,
/packages/kbn-optimizer/src/worker/entry_point_creator.ts,
diff --git a/packages/kbn-optimizer/tsconfig.json b/packages/kbn-optimizer/tsconfig.json
index 047c98db8a806..5fbd02106e777 100644
--- a/packages/kbn-optimizer/tsconfig.json
+++ b/packages/kbn-optimizer/tsconfig.json
@@ -1,9 +1,10 @@
{
"extends": "../../tsconfig.bazel.json",
"compilerOptions": {
- "outDir": "./target/types",
"declaration": true,
"declarationMap": true,
+ "emitDeclarationOnly": true,
+ "outDir": "./target_types",
"rootDir": "./src",
"sourceMap": true,
"sourceRoot": "../../../../packages/kbn-optimizer/src",
diff --git a/packages/kbn-plugin-helpers/.babelrc b/packages/kbn-plugin-helpers/.babelrc
new file mode 100644
index 0000000000000..7da72d1779128
--- /dev/null
+++ b/packages/kbn-plugin-helpers/.babelrc
@@ -0,0 +1,3 @@
+{
+ "presets": ["@kbn/babel-preset/node_preset"]
+}
diff --git a/packages/kbn-plugin-helpers/BUILD.bazel b/packages/kbn-plugin-helpers/BUILD.bazel
index 9242701770a86..d7744aecac26e 100644
--- a/packages/kbn-plugin-helpers/BUILD.bazel
+++ b/packages/kbn-plugin-helpers/BUILD.bazel
@@ -1,6 +1,7 @@
load("@npm//@bazel/typescript:index.bzl", "ts_config", "ts_project")
load("@build_bazel_rules_nodejs//:index.bzl", "js_library", "pkg_npm")
+load("//src/dev/bazel:index.bzl", "jsts_transpiler")
PKG_BASE_NAME = "kbn-plugin-helpers"
PKG_REQUIRE_NAME = "@kbn/plugin-helpers"
@@ -26,7 +27,7 @@ NPM_MODULE_EXTRA_FILES = [
"README.md"
]
-SRC_DEPS = [
+RUNTIME_DEPS = [
"//packages/kbn-dev-utils",
"//packages/kbn-optimizer",
"//packages/kbn-utils",
@@ -41,6 +42,13 @@ SRC_DEPS = [
]
TYPES_DEPS = [
+ "//packages/kbn-dev-utils",
+ "//packages/kbn-optimizer",
+ "//packages/kbn-utils",
+ "@npm//del",
+ "@npm//execa",
+ "@npm//globby",
+ "@npm//load-json-file",
"@npm//@types/extract-zip",
"@npm//@types/gulp-zip",
"@npm//@types/inquirer",
@@ -49,7 +57,11 @@ TYPES_DEPS = [
"@npm//@types/vinyl-fs",
]
-DEPS = SRC_DEPS + TYPES_DEPS
+jsts_transpiler(
+ name = "target_node",
+ srcs = SRCS,
+ build_pkg_name = package_name(),
+)
ts_config(
name = "tsconfig",
@@ -61,13 +73,14 @@ ts_config(
)
ts_project(
- name = "tsc",
+ name = "tsc_types",
args = ['--pretty'],
srcs = SRCS,
- deps = DEPS,
+ deps = TYPES_DEPS,
declaration = True,
declaration_map = True,
- out_dir = "target",
+ emit_declaration_only = True,
+ out_dir = "target_types",
source_map = True,
root_dir = "src",
tsconfig = ":tsconfig",
@@ -76,7 +89,7 @@ ts_project(
js_library(
name = PKG_BASE_NAME,
srcs = NPM_MODULE_EXTRA_FILES,
- deps = DEPS + [":tsc"],
+ deps = RUNTIME_DEPS + [":target_node", ":tsc_types"],
package_name = PKG_REQUIRE_NAME,
visibility = ["//visibility:public"],
)
diff --git a/packages/kbn-plugin-helpers/package.json b/packages/kbn-plugin-helpers/package.json
index 1f4df52a03304..21ed8f46f52fa 100644
--- a/packages/kbn-plugin-helpers/package.json
+++ b/packages/kbn-plugin-helpers/package.json
@@ -7,8 +7,8 @@
"kibana": {
"devOnly": true
},
- "main": "target/index.js",
- "types": "target/index.d.ts",
+ "main": "target_node/index.js",
+ "types": "target_types/index.d.ts",
"bin": {
"plugin-helpers": "bin/plugin-helpers.js"
}
diff --git a/packages/kbn-plugin-helpers/tsconfig.json b/packages/kbn-plugin-helpers/tsconfig.json
index 22adf020187ba..34f3ec5e67503 100644
--- a/packages/kbn-plugin-helpers/tsconfig.json
+++ b/packages/kbn-plugin-helpers/tsconfig.json
@@ -1,12 +1,13 @@
{
"extends": "../../tsconfig.bazel.json",
"compilerOptions": {
- "outDir": "target",
- "target": "ES2018",
"declaration": true,
"declarationMap": true,
+ "emitDeclarationOnly": true,
+ "outDir": "target_types",
"sourceMap": true,
"sourceRoot": "../../../../packages/kbn-plugin-helpers/src",
+ "target": "ES2018",
"types": [
"jest",
"node"
diff --git a/packages/kbn-securitysolution-autocomplete/.babelrc b/packages/kbn-securitysolution-autocomplete/.babelrc
new file mode 100644
index 0000000000000..40a198521b903
--- /dev/null
+++ b/packages/kbn-securitysolution-autocomplete/.babelrc
@@ -0,0 +1,4 @@
+{
+ "presets": ["@kbn/babel-preset/node_preset"],
+ "ignore": ["**/*.test.ts", "**/*.test.tsx"]
+}
diff --git a/packages/kbn-securitysolution-autocomplete/.babelrc.browser b/packages/kbn-securitysolution-autocomplete/.babelrc.browser
new file mode 100644
index 0000000000000..71bbfbcd6eb2f
--- /dev/null
+++ b/packages/kbn-securitysolution-autocomplete/.babelrc.browser
@@ -0,0 +1,4 @@
+{
+ "presets": ["@kbn/babel-preset/webpack_preset"],
+ "ignore": ["**/*.test.ts", "**/*.test.tsx"]
+}
diff --git a/packages/kbn-securitysolution-autocomplete/BUILD.bazel b/packages/kbn-securitysolution-autocomplete/BUILD.bazel
index 18c3b8f3ae3bb..53cd7b4f8d3e1 100644
--- a/packages/kbn-securitysolution-autocomplete/BUILD.bazel
+++ b/packages/kbn-securitysolution-autocomplete/BUILD.bazel
@@ -1,5 +1,6 @@
load("@npm//@bazel/typescript:index.bzl", "ts_config", "ts_project")
load("@build_bazel_rules_nodejs//:index.bzl", "js_library", "pkg_npm")
+load("//src/dev/bazel:index.bzl", "jsts_transpiler")
PKG_BASE_NAME = "kbn-securitysolution-autocomplete"
@@ -25,35 +26,54 @@ filegroup(
)
NPM_MODULE_EXTRA_FILES = [
- "react/package.json",
"package.json",
"README.md",
]
-SRC_DEPS = [
- "//packages/kbn-babel-preset",
- "//packages/kbn-dev-utils",
+RUNTIME_DEPS = [
+ "//packages/kbn-es-query",
"//packages/kbn-i18n",
- "//packages/kbn-securitysolution-io-ts-list-types",
"//packages/kbn-securitysolution-list-hooks",
- "//packages/kbn-es-query",
- "@npm//@babel/core",
- "@npm//babel-loader",
+ "//packages/kbn-securitysolution-list-utils",
+ "//packages/kbn-securitysolution-io-ts-list-types",
"@npm//@elastic/eui",
+ "@npm//@testing-library/react",
+ "@npm//@testing-library/react-hooks",
+ "@npm//enzyme",
+ "@npm//moment",
"@npm//react",
"@npm//resize-observer-polyfill",
- "@npm//rxjs",
- "@npm//tslib",
]
TYPES_DEPS = [
- "@npm//typescript",
+ "//packages/kbn-es-query",
+ "//packages/kbn-i18n",
+ "//packages/kbn-securitysolution-list-hooks",
+ "//packages/kbn-securitysolution-list-utils",
+ "//packages/kbn-securitysolution-io-ts-list-types",
+ "@npm//@elastic/eui",
+ "@npm//@testing-library/react",
+ "@npm//@testing-library/react-hooks",
+ "@npm//moment",
+ "@npm//resize-observer-polyfill",
+ "@npm//@types/enzyme",
"@npm//@types/jest",
"@npm//@types/node",
"@npm//@types/react",
]
-DEPS = SRC_DEPS + TYPES_DEPS
+jsts_transpiler(
+ name = "target_node",
+ srcs = SRCS,
+ build_pkg_name = package_name(),
+)
+
+jsts_transpiler(
+ name = "target_web",
+ srcs = SRCS,
+ build_pkg_name = package_name(),
+ config_file = ".babelrc.browser"
+)
ts_config(
name = "tsconfig",
@@ -64,50 +84,26 @@ ts_config(
],
)
-ts_config(
- name = "tsconfig_browser",
- src = "tsconfig.browser.json",
- deps = [
- "//:tsconfig.base.json",
- "//:tsconfig.browser.json",
- "//:tsconfig.browser_bazel.json",
- ],
-)
-
ts_project(
- name = "tsc",
+ name = "tsc_types",
args = ["--pretty"],
srcs = SRCS,
- deps = DEPS,
- allow_js = True,
+ deps = TYPES_DEPS,
declaration = True,
- declaration_dir = "target_types",
declaration_map = True,
- out_dir = "target_node",
+ emit_declaration_only = True,
+ out_dir = "target_types",
root_dir = "src",
source_map = True,
tsconfig = ":tsconfig",
)
-ts_project(
- name = "tsc_browser",
- args = ['--pretty'],
- srcs = SRCS,
- deps = DEPS,
- allow_js = True,
- declaration = False,
- out_dir = "target_web",
- source_map = True,
- root_dir = "src",
- tsconfig = ":tsconfig_browser",
-)
-
js_library(
name = PKG_BASE_NAME,
- package_name = PKG_REQUIRE_NAME,
srcs = NPM_MODULE_EXTRA_FILES,
+ deps = RUNTIME_DEPS + [":target_node", ":target_web", ":tsc_types"],
+ package_name = PKG_REQUIRE_NAME,
visibility = ["//visibility:public"],
- deps = [":tsc", ":tsc_browser"] + DEPS,
)
pkg_npm(
diff --git a/packages/kbn-securitysolution-autocomplete/babel.config.js b/packages/kbn-securitysolution-autocomplete/babel.config.js
deleted file mode 100644
index b4a118df51af5..0000000000000
--- a/packages/kbn-securitysolution-autocomplete/babel.config.js
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the Elastic License
- * 2.0 and the Server Side Public License, v 1; you may not use this file except
- * in compliance with, at your election, the Elastic License 2.0 or the Server
- * Side Public License, v 1.
- */
-
-module.exports = {
- env: {
- web: {
- presets: ['@kbn/babel-preset/webpack_preset'],
- },
- node: {
- presets: ['@kbn/babel-preset/node_preset'],
- },
- },
- ignore: ['**/*.test.ts', '**/*.test.tsx'],
-};
diff --git a/packages/kbn-securitysolution-autocomplete/react/package.json b/packages/kbn-securitysolution-autocomplete/react/package.json
deleted file mode 100644
index c5f222b5843ac..0000000000000
--- a/packages/kbn-securitysolution-autocomplete/react/package.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "browser": "../target_web/react",
- "main": "../target_node/react",
- "types": "../target_types/react/index.d.ts"
-}
diff --git a/packages/kbn-securitysolution-autocomplete/tsconfig.browser.json b/packages/kbn-securitysolution-autocomplete/tsconfig.browser.json
deleted file mode 100644
index 404043569aa92..0000000000000
--- a/packages/kbn-securitysolution-autocomplete/tsconfig.browser.json
+++ /dev/null
@@ -1,22 +0,0 @@
-{
- "extends": "../../tsconfig.browser_bazel.json",
- "compilerOptions": {
- "allowJs": true,
- "outDir": "./target_web",
- "declaration": false,
- "isolatedModules": true,
- "sourceMap": true,
- "sourceRoot": "../../../../../packages/kbn-securitysolution-autocomplete/src",
- "types": [
- "jest",
- "node"
- ],
- },
- "include": [
- "src/**/*.ts",
- "src/**/*.tsx",
- ],
- "exclude": [
- "**/__fixtures__/**/*"
- ]
-}
diff --git a/packages/kbn-securitysolution-autocomplete/tsconfig.json b/packages/kbn-securitysolution-autocomplete/tsconfig.json
index 484b639f94332..fa7eff8234011 100644
--- a/packages/kbn-securitysolution-autocomplete/tsconfig.json
+++ b/packages/kbn-securitysolution-autocomplete/tsconfig.json
@@ -1,15 +1,14 @@
{
"extends": "../../tsconfig.bazel.json",
"compilerOptions": {
- "allowJs": true,
- "declarationDir": "./target_types",
- "outDir": "target_node",
"declaration": true,
"declarationMap": true,
+ "emitDeclarationOnly": true,
+ "outDir": "target_types",
"sourceMap": true,
"sourceRoot": "../../../../packages/kbn-securitysolution-autocomplete/src",
"rootDir": "src",
"types": ["jest", "node", "resize-observer-polyfill"]
},
- "include": ["src/**/*"]
+ "include": ["src/**/*"],
}
diff --git a/packages/kbn-securitysolution-utils/.babelrc b/packages/kbn-securitysolution-utils/.babelrc
new file mode 100644
index 0000000000000..40a198521b903
--- /dev/null
+++ b/packages/kbn-securitysolution-utils/.babelrc
@@ -0,0 +1,4 @@
+{
+ "presets": ["@kbn/babel-preset/node_preset"],
+ "ignore": ["**/*.test.ts", "**/*.test.tsx"]
+}
diff --git a/packages/kbn-securitysolution-utils/BUILD.bazel b/packages/kbn-securitysolution-utils/BUILD.bazel
index 41fb97bc6079e..c3d6b92044ef6 100644
--- a/packages/kbn-securitysolution-utils/BUILD.bazel
+++ b/packages/kbn-securitysolution-utils/BUILD.bazel
@@ -1,5 +1,6 @@
load("@npm//@bazel/typescript:index.bzl", "ts_config", "ts_project")
load("@build_bazel_rules_nodejs//:index.bzl", "js_library", "pkg_npm")
+load("//src/dev/bazel:index.bzl", "jsts_transpiler")
PKG_BASE_NAME = "kbn-securitysolution-utils"
@@ -27,18 +28,23 @@ NPM_MODULE_EXTRA_FILES = [
"README.md",
]
-SRC_DEPS = [
+RUNTIME_DEPS = [
"@npm//tslib",
"@npm//uuid",
]
TYPES_DEPS = [
+ "@npm//tslib",
"@npm//@types/jest",
"@npm//@types/node",
"@npm//@types/uuid"
]
-DEPS = SRC_DEPS + TYPES_DEPS
+jsts_transpiler(
+ name = "target_node",
+ srcs = SRCS,
+ build_pkg_name = package_name(),
+)
ts_config(
name = "tsconfig",
@@ -50,24 +56,25 @@ ts_config(
)
ts_project(
- name = "tsc",
- srcs = SRCS,
+ name = "tsc_types",
args = ["--pretty"],
+ srcs = SRCS,
+ deps = TYPES_DEPS,
declaration = True,
declaration_map = True,
- out_dir = "target",
+ emit_declaration_only = True,
+ out_dir = "target_types",
root_dir = "src",
source_map = True,
tsconfig = ":tsconfig",
- deps = DEPS,
)
js_library(
name = PKG_BASE_NAME,
- package_name = PKG_REQUIRE_NAME,
srcs = NPM_MODULE_EXTRA_FILES,
+ deps = RUNTIME_DEPS + [":target_node", ":tsc_types"],
+ package_name = PKG_REQUIRE_NAME,
visibility = ["//visibility:public"],
- deps = DEPS + [":tsc"],
)
pkg_npm(
diff --git a/packages/kbn-securitysolution-utils/package.json b/packages/kbn-securitysolution-utils/package.json
index d4b46ed07bfdd..98f19e33d379b 100644
--- a/packages/kbn-securitysolution-utils/package.json
+++ b/packages/kbn-securitysolution-utils/package.json
@@ -3,7 +3,7 @@
"version": "1.0.0",
"description": "security solution utilities to use across plugins such lists, security_solution, cases, etc...",
"license": "SSPL-1.0 OR Elastic License 2.0",
- "main": "./target/index.js",
- "types": "./target/index.d.ts",
+ "main": "./target_node/index.js",
+ "types": "./target_types/index.d.ts",
"private": true
}
diff --git a/packages/kbn-securitysolution-utils/tsconfig.json b/packages/kbn-securitysolution-utils/tsconfig.json
index 3894b53d6cff3..23fdf3178e174 100644
--- a/packages/kbn-securitysolution-utils/tsconfig.json
+++ b/packages/kbn-securitysolution-utils/tsconfig.json
@@ -3,7 +3,8 @@
"compilerOptions": {
"declaration": true,
"declarationMap": true,
- "outDir": "target",
+ "emitDeclarationOnly": true,
+ "outDir": "target_types",
"rootDir": "src",
"sourceMap": true,
"sourceRoot": "../../../../packages/kbn-securitysolution-utils/src",
diff --git a/packages/kbn-test/jest-preset.js b/packages/kbn-test/jest-preset.js
index a52de37fb2008..0199aa6e311b6 100644
--- a/packages/kbn-test/jest-preset.js
+++ b/packages/kbn-test/jest-preset.js
@@ -103,11 +103,11 @@ module.exports = {
// An array of regexp pattern strings that are matched against all source file paths, matched files to include/exclude for code coverage
collectCoverageFrom: [
'**/*.{js,mjs,jsx,ts,tsx}',
- '!**/{__test__,__snapshots__,__examples__,mocks,tests,test_helpers,integration_tests,types}/**/*',
- '!**/*mock*.ts',
- '!**/*.test.ts',
+ '!**/{__test__,__snapshots__,__examples__,*mock*,tests,test_helpers,integration_tests,types}/**/*',
+ '!**/*mock*.{ts,tsx}',
+ '!**/*.test.{ts,tsx}',
'!**/*.d.ts',
- '!**/index.{js,ts}',
+ '!**/index.{js,ts,tsx}',
],
// A custom resolver to preserve symlinks by default
diff --git a/packages/kbn-test/src/jest/setup/babel_polyfill.js b/packages/kbn-test/src/jest/setup/babel_polyfill.js
index 7dda4cceec65c..7981eb668f38f 100644
--- a/packages/kbn-test/src/jest/setup/babel_polyfill.js
+++ b/packages/kbn-test/src/jest/setup/babel_polyfill.js
@@ -9,4 +9,4 @@
// Note: In theory importing the polyfill should not be needed, as Babel should
// include the necessary polyfills when using `@babel/preset-env`, but for some
// reason it did not work. See https://github.com/elastic/kibana/issues/14506
-import '@kbn/optimizer/target/node/polyfill';
+import '@kbn/optimizer/target_node/node/polyfill';
diff --git a/packages/kbn-typed-react-router-config/src/create_router.test.tsx b/packages/kbn-typed-react-router-config/src/create_router.test.tsx
index d8f42c8714e8b..3fb37f813e2e1 100644
--- a/packages/kbn-typed-react-router-config/src/create_router.test.tsx
+++ b/packages/kbn-typed-react-router-config/src/create_router.test.tsx
@@ -201,6 +201,21 @@ describe('createRouter', () => {
},
});
});
+
+ it('supports multiple paths', () => {
+ history.push('/service-map?rangeFrom=now-15m&rangeTo=now&maxNumNodes=3');
+
+ const params = router.getParams('/services', '/service-map', history.location);
+
+ expect(params).toEqual({
+ path: {},
+ query: {
+ maxNumNodes: 3,
+ rangeFrom: 'now-15m',
+ rangeTo: 'now',
+ },
+ });
+ });
});
describe('matchRoutes', () => {
diff --git a/packages/kbn-typed-react-router-config/src/create_router.ts b/packages/kbn-typed-react-router-config/src/create_router.ts
index 28f9e2774eb74..370d8b48e53b4 100644
--- a/packages/kbn-typed-react-router-config/src/create_router.ts
+++ b/packages/kbn-typed-react-router-config/src/create_router.ts
@@ -9,6 +9,7 @@ import { isLeft } from 'fp-ts/lib/Either';
import { Location } from 'history';
import { PathReporter } from 'io-ts/lib/PathReporter';
import {
+ MatchedRoute,
matchRoutes as matchRoutesConfig,
RouteConfig as ReactRouterConfig,
} from 'react-router-config';
@@ -49,33 +50,44 @@ export function createRouter(routes: TRoutes): Router {
- let path: string = args[0];
- let location: Location = args[1];
- let optional: boolean = args[2];
-
- if (args.length === 1) {
- location = args[0] as Location;
- path = location.pathname;
- optional = args[1];
+ let optional: boolean = false;
+
+ if (typeof args[args.length - 1] === 'boolean') {
+ optional = args[args.length - 1];
+ args.pop();
}
- const greedy = path.endsWith('/*') || args.length === 1;
+ const location: Location = args[args.length - 1];
+ args.pop();
+
+ let paths: string[] = args;
- if (!path) {
- path = '/';
+ if (paths.length === 0) {
+ paths = [location.pathname || '/'];
}
- const matches = matchRoutesConfig(reactRouterConfigs, location.pathname);
+ let matches: Array> = [];
+ let matchIndex: number = -1;
- const matchIndex = greedy
- ? matches.length - 1
- : findLastIndex(matches, (match) => match.route.path === path);
+ for (const path of paths) {
+ const greedy = path.endsWith('/*') || args.length === 0;
+ matches = matchRoutesConfig(reactRouterConfigs, location.pathname);
+
+ matchIndex = greedy
+ ? matches.length - 1
+ : findLastIndex(matches, (match) => match.route.path === path);
+
+ if (matchIndex !== -1) {
+ break;
+ }
+ matchIndex = -1;
+ }
if (matchIndex === -1) {
if (optional) {
return [];
}
- throw new Error(`No matching route found for ${path}`);
+ throw new Error(`No matching route found for ${paths}`);
}
return matches.slice(0, matchIndex + 1).map((matchedRoute) => {
diff --git a/packages/kbn-typed-react-router-config/src/types/index.ts b/packages/kbn-typed-react-router-config/src/types/index.ts
index 0e02318c50aad..4d26d2879d5e7 100644
--- a/packages/kbn-typed-react-router-config/src/types/index.ts
+++ b/packages/kbn-typed-react-router-config/src/types/index.ts
@@ -134,6 +134,22 @@ export interface Router {
location: Location,
optional: TOptional
): TOptional extends true ? TypeOf | undefined : TypeOf;
+ getParams, T2 extends PathsOf>(
+ path1: T1,
+ path2: T2,
+ location: Location
+ ): TypeOf | TypeOf;
+ getParams, T2 extends PathsOf, T3 extends PathsOf>(
+ path1: T1,
+ path2: T2,
+ path3: T3,
+ location: Location
+ ): TypeOf | TypeOf | TypeOf;
+ getParams, TOptional extends boolean>(
+ path: TPath,
+ location: Location,
+ optional: TOptional
+ ): TOptional extends true ? TypeOf | undefined : TypeOf;
link>(
path: TPath,
...args: TypeAsArgs>
diff --git a/packages/kbn-typed-react-router-config/src/use_params.ts b/packages/kbn-typed-react-router-config/src/use_params.ts
index 94a5cf401c569..0468eb9566236 100644
--- a/packages/kbn-typed-react-router-config/src/use_params.ts
+++ b/packages/kbn-typed-react-router-config/src/use_params.ts
@@ -6,12 +6,26 @@
* Side Public License, v 1.
*/
+import { Location } from 'history';
import { useLocation } from 'react-router-dom';
import { useRouter } from './use_router';
-export function useParams(path: string, optional: boolean = false) {
+export function useParams(...args: any[]) {
const router = useRouter();
const location = useLocation();
- return router.getParams(path as never, location, optional);
+ let optional: boolean = false;
+
+ const last: boolean | string | undefined = args[args.length - 1];
+
+ if (typeof last === 'boolean') {
+ optional = last;
+ args.pop();
+ }
+
+ const paths = args as string[];
+
+ const getParamsArgs = [...paths, location, optional] as [never, Location, boolean];
+
+ return router.getParams(...getParamsArgs);
}
diff --git a/packages/kbn-ui-shared-deps/.babelrc b/packages/kbn-ui-shared-deps/.babelrc
new file mode 100644
index 0000000000000..7da72d1779128
--- /dev/null
+++ b/packages/kbn-ui-shared-deps/.babelrc
@@ -0,0 +1,3 @@
+{
+ "presets": ["@kbn/babel-preset/node_preset"]
+}
diff --git a/packages/kbn-ui-shared-deps/BUILD.bazel b/packages/kbn-ui-shared-deps/BUILD.bazel
index 352fd48907345..8bc9555e640b5 100644
--- a/packages/kbn-ui-shared-deps/BUILD.bazel
+++ b/packages/kbn-ui-shared-deps/BUILD.bazel
@@ -1,6 +1,7 @@
load("@npm//@bazel/typescript:index.bzl", "ts_config", "ts_project")
load("@build_bazel_rules_nodejs//:index.bzl", "js_library", "pkg_npm")
load("@npm//webpack-cli:index.bzl", webpack = "webpack_cli")
+load("//src/dev/bazel:index.bzl", "jsts_transpiler")
PKG_BASE_NAME = "kbn-ui-shared-deps"
PKG_REQUIRE_NAME = "@kbn/ui-shared-deps"
@@ -28,7 +29,7 @@ NPM_MODULE_EXTRA_FILES = [
"README.md"
]
-SRC_DEPS = [
+RUNTIME_DEPS = [
"//packages/elastic-datemath",
"//packages/elastic-safer-lodash-set",
"//packages/kbn-analytics",
@@ -71,10 +72,53 @@ SRC_DEPS = [
]
TYPES_DEPS = [
+ "//packages/elastic-datemath",
+ "//packages/elastic-safer-lodash-set",
+ "//packages/kbn-analytics",
+ "//packages/kbn-babel-preset",
+ "//packages/kbn-i18n",
+ "//packages/kbn-monaco",
+ "//packages/kbn-std",
+ "//packages/kbn-utils",
+ "@npm//@elastic/charts",
+ "@npm//@elastic/eui",
+ "@npm//@elastic/numeral",
+ "@npm//@emotion/react",
+ "@npm//abortcontroller-polyfill",
+ "@npm//angular",
+ "@npm//babel-loader",
+ "@npm//core-js",
+ "@npm//css-loader",
+ "@npm//fflate",
+ "@npm//jquery",
+ "@npm//loader-utils",
+ "@npm//mini-css-extract-plugin",
+ "@npm//moment",
+ "@npm//moment-timezone",
+ "@npm//raw-loader",
+ "@npm//react",
+ "@npm//react-dom",
+ "@npm//react-intl",
+ "@npm//react-is",
+ "@npm//react-router",
+ "@npm//react-router-dom",
+ "@npm//regenerator-runtime",
+ "@npm//resize-observer-polyfill",
+ "@npm//rison-node",
+ "@npm//rxjs",
+ "@npm//styled-components",
+ "@npm//symbol-observable",
+ "@npm//url-loader",
+ "@npm//val-loader",
+ "@npm//whatwg-fetch",
"@npm//@types/node",
]
-DEPS = SRC_DEPS + TYPES_DEPS
+jsts_transpiler(
+ name = "target_node",
+ srcs = SRCS,
+ build_pkg_name = package_name(),
+)
ts_config(
name = "tsconfig",
@@ -86,22 +130,23 @@ ts_config(
)
ts_project(
- name = "tsc",
+ name = "tsc_types",
args = ['--pretty'],
srcs = SRCS,
- deps = DEPS,
+ deps = TYPES_DEPS,
allow_js = True,
declaration = True,
declaration_map = True,
- out_dir = "target",
- source_map = True,
+ emit_declaration_only = True,
+ out_dir = "target_types",
root_dir = "src",
+ source_map = True,
tsconfig = ":tsconfig",
)
webpack(
name = "shared_built_assets",
- data = DEPS + [
+ data = RUNTIME_DEPS + [
"//:package.json",
":srcs",
":tsconfig",
@@ -120,7 +165,7 @@ webpack(
js_library(
name = PKG_BASE_NAME,
srcs = NPM_MODULE_EXTRA_FILES,
- deps = DEPS + [":tsc", ":shared_built_assets"],
+ deps = RUNTIME_DEPS + [":target_node", ":tsc_types", ":shared_built_assets"],
package_name = PKG_REQUIRE_NAME,
visibility = ["//visibility:public"],
)
diff --git a/packages/kbn-ui-shared-deps/flot_charts/package.json b/packages/kbn-ui-shared-deps/flot_charts/package.json
index 03d7ac348fcb9..6c2f62447daf5 100644
--- a/packages/kbn-ui-shared-deps/flot_charts/package.json
+++ b/packages/kbn-ui-shared-deps/flot_charts/package.json
@@ -1,4 +1,4 @@
{
- "main": "../target/flot_charts/index.js",
- "types": "../target/flot_charts/index.d.ts"
+ "main": "../target_node/flot_charts/index.js",
+ "types": "../target_types/flot_charts/index.d.ts"
}
\ No newline at end of file
diff --git a/packages/kbn-ui-shared-deps/package.json b/packages/kbn-ui-shared-deps/package.json
index 5ec32ca059aa1..f360d37db11c8 100644
--- a/packages/kbn-ui-shared-deps/package.json
+++ b/packages/kbn-ui-shared-deps/package.json
@@ -3,6 +3,6 @@
"version": "1.0.0",
"private": true,
"license": "SSPL-1.0 OR Elastic License 2.0",
- "main": "target/index.js",
- "types": "target/index.d.ts"
+ "main": "target_node/index.js",
+ "types": "target_types/index.d.ts"
}
\ No newline at end of file
diff --git a/packages/kbn-ui-shared-deps/theme/package.json b/packages/kbn-ui-shared-deps/theme/package.json
index 2d41937701a29..37d60f83b18e9 100644
--- a/packages/kbn-ui-shared-deps/theme/package.json
+++ b/packages/kbn-ui-shared-deps/theme/package.json
@@ -1,4 +1,4 @@
{
- "main": "../target/theme.js",
- "types": "../target/theme.d.ts"
+ "main": "../target_node/theme.js",
+ "types": "../target_types/theme.d.ts"
}
\ No newline at end of file
diff --git a/packages/kbn-ui-shared-deps/tsconfig.json b/packages/kbn-ui-shared-deps/tsconfig.json
index 90a89ac580a40..81a8a6b200ada 100644
--- a/packages/kbn-ui-shared-deps/tsconfig.json
+++ b/packages/kbn-ui-shared-deps/tsconfig.json
@@ -2,9 +2,10 @@
"extends": "../../tsconfig.bazel.json",
"compilerOptions": {
"allowJs": true,
- "outDir": "./target/types",
"declaration": true,
"declarationMap": true,
+ "emitDeclarationOnly": true,
+ "outDir": "./target_types",
"rootDir": "src",
"sourceMap": true,
"sourceRoot": "../../../../packages/kbn-ui-shared-deps/src",
diff --git a/rfcs/text/0013_saved_object_migrations.md b/rfcs/text/0013_saved_object_migrations.md
new file mode 100644
index 0000000000000..2f7ed796bf0e6
--- /dev/null
+++ b/rfcs/text/0013_saved_object_migrations.md
@@ -0,0 +1,824 @@
+- Start Date: 2020-05-11
+- RFC PR: (leave this empty)
+- Kibana Issue: (leave this empty)
+
+---
+- [1. Summary](#1-summary)
+- [2. Motivation](#2-motivation)
+- [3. Saved Object Migration Errors](#3-saved-object-migration-errors)
+- [4. Design](#4-design)
+ - [4.0 Assumptions and tradeoffs](#40-assumptions-and-tradeoffs)
+ - [4.1 Discover and remedy potential failures before any downtime](#41-discover-and-remedy-potential-failures-before-any-downtime)
+ - [4.2 Automatically retry failed migrations until they succeed](#42-automatically-retry-failed-migrations-until-they-succeed)
+ - [4.2.1 Idempotent migrations performed without coordination](#421-idempotent-migrations-performed-without-coordination)
+ - [4.2.1.1 Restrictions](#4211-restrictions)
+ - [4.2.1.2 Migration algorithm: Cloned index per version](#4212-migration-algorithm-cloned-index-per-version)
+ - [Known weaknesses:](#known-weaknesses)
+ - [4.2.1.3 Upgrade and rollback procedure](#4213-upgrade-and-rollback-procedure)
+ - [4.2.1.4 Handling documents that belong to a disabled plugin](#4214-handling-documents-that-belong-to-a-disabled-plugin)
+- [5. Alternatives](#5-alternatives)
+ - [5.1 Rolling upgrades](#51-rolling-upgrades)
+ - [5.2 Single node migrations coordinated through a lease/lock](#52-single-node-migrations-coordinated-through-a-leaselock)
+ - [5.2.1 Migration algorithm](#521-migration-algorithm)
+ - [5.2.2 Document lock algorithm](#522-document-lock-algorithm)
+ - [5.2.3 Checking for "weak lease" expiry](#523-checking-for-weak-lease-expiry)
+ - [5.3 Minimize data loss with mixed Kibana versions during 7.x](#53-minimize-data-loss-with-mixed-kibana-versions-during-7x)
+ - [5.4 In-place migrations that re-use the same index (8.0)](#54-in-place-migrations-that-re-use-the-same-index-80)
+ - [5.4.1 Migration algorithm (8.0):](#541-migration-algorithm-80)
+ - [5.4.2 Minimizing data loss with unsupported upgrade configurations (8.0)](#542-minimizing-data-loss-with-unsupported-upgrade-configurations-80)
+ - [5.5 Tag objects as “invalid” if their transformation fails](#55-tag-objects-as-invalid-if-their-transformation-fails)
+- [6. How we teach this](#6-how-we-teach-this)
+- [7. Unresolved questions](#7-unresolved-questions)
+
+# 1. Summary
+
+Improve the Saved Object migration algorithm to ensure a smooth Kibana upgrade
+procedure.
+
+# 2. Motivation
+
+Kibana version upgrades should have a minimal operational impact. To achieve
+this, users should be able to rely on:
+
+1. A predictable downtime window.
+2. A small downtime window.
+ 1. (future) provide a small downtime window on indices with 10k or even
+ a 100k documents.
+3. The ability to discover and remedy potential failures before initiating the
+ downtime window.
+4. Quick roll-back in case of failure.
+5. Detailed documentation about the impact of downtime on the features they
+ are using (e.g. actions, task manager, fleet, reporting).
+6. Mixed Kibana versions shouldn’t cause data loss.
+7. (stretch goal) Maintain read-only functionality during the downtime window.
+
+The biggest hurdle to achieving the above is Kibana’s Saved Object migrations.
+Migrations aren’t resilient and require manual intervention anytime an error
+occurs (see [3. Saved Object Migration
+Errors](#3-saved-object-migration-errors)).
+
+It is impossible to discover these failures before initiating downtime. Errors
+often force users to roll-back to a previous version of Kibana or cause hours
+of downtime. To retry the migration, users are asked to manually delete a
+`.kibana_x` index. If done incorrectly this can lead to data loss, making it a
+terrifying experience (restoring from a pre-upgrade snapshot is a safer
+alternative but not mentioned in the docs or logs).
+
+Cloud users don’t have access to Kibana logs to be able to identify and remedy
+the cause of the migration failure. Apart from blindly retrying migrations by
+restoring a previous snapshot, cloud users are unable to remedy a failed
+migration and have to escalate to support which can further delay resolution.
+
+Taken together, version upgrades are a major operational risk and discourage
+users from adopting the latest features.
+
+# 3. Saved Object Migration Errors
+
+Any of the following classes of errors could result in a Saved Object
+migration failure which requires manual intervention to resolve:
+
+1. A bug in a plugin’s registered document transformation function causes it
+ to throw an exception on _valid_ data.
+2. _Invalid_ data stored in Elasticsearch causes a plugin’s registered
+ document transformation function to throw an exception .
+3. Failures resulting from an unhealthy Elasticsearch cluster:
+ 1. Maximum shards open
+ 2. Too many scroll contexts
+ 3. `circuit_breaking_exception` (insufficient heap memory)
+ 4. `process_cluster_event_timeout_exception` for index-aliases, create-index, put-mappings
+ 5. Read-only indices due to low disk space (hitting the flood_stage watermark)
+ 6. Re-index failed: search rejected due to missing shards
+ 7. `TooManyRequests` while doing a `count` of documents requiring a migration
+ 8. Bulk write failed: primary shard is not active
+4. The Kibana process is killed while migrations are in progress.
+
+# 4. Design
+## 4.0 Assumptions and tradeoffs
+The proposed design makes several important assumptions and tradeoffs.
+
+**Background:**
+
+The 7.x upgrade documentation lists taking an Elasticsearch snapshot as a
+required step, but we instruct users to retry migrations and perform rollbacks
+by deleting the failed `.kibana_n` index and pointing the `.kibana` alias to
+`.kibana_n-1`:
+ - [Handling errors during saved object
+migrations.](https://github.com/elastic/kibana/blob/75444a9f1879c5702f9f2b8ad4a70a3a0e75871d/docs/setup/upgrade/upgrade-migrations.asciidoc#handling-errors-during-saved-object-migrations)
+ - [Rolling back to a previous version of Kibana.](https://github.com/elastic/kibana/blob/75444a9f1879c5702f9f2b8ad4a70a3a0e75871d/docs/setup/upgrade/upgrade-migrations.asciidoc#rolling-back-to-a-previous-version-of-kib)
+ - Server logs from failed migrations.
+
+**Assumptions and tradeoffs:**
+1. It is critical to maintain a backup index during 7.x to ensure that anyone
+ following the existing upgrade / rollback procedures don't end up in a
+ position where they no longer can recover their data.
+ 1. This excludes us from introducing in-place migrations to support huge
+ indices during 7.x.
+2. The simplicity of idempotent, coordination-free migrations outweighs the
+ restrictions this will impose on the kinds of migrations we're able to
+ support in the future. See (4.2.1)
+3. A saved object type (and it's associated migrations) will only ever be
+ owned by one plugin. If pluginA registers saved object type `plugin_a_type`
+ then pluginB must never register that same type, even if pluginA is
+ disabled. Although we cannot enforce it on third-party plugins, breaking
+ this assumption may lead to data loss.
+
+## 4.1 Discover and remedy potential failures before any downtime
+
+> Achieves goals: (2.3)
+> Mitigates errors: (3.1), (3.2)
+
+1. Introduce a CLI option to perform a dry run migration to allow
+ administrators to locate and fix potential migration failures without
+ taking their existing Kibana node(s) offline.
+2. To have the highest chance of surfacing potential failures such as low disk
+ space, dry run migrations should not be mere simulations. A dry run should
+ perform a real migration in a way that doesn’t impact the existing Kibana
+ cluster.
+3. The CLI should generate a migration report to make it easy to create a
+ support request from a failed migration dry run.
+ 1. The report would be an NDJSON export of all failed objects.
+ 2. If support receives such a report, we could modify all the objects to
+ ensure the migration would pass and send this back to the client.
+ 3. The client can then import the updated objects using the standard Saved
+ Objects NDJSON import and run another dry run to verify all problems
+ have been fixed.
+4. Make running dry run migrations a required step in the upgrade procedure
+ documentation.
+5. (Optional) Add dry run migrations to the standard cloud upgrade procedure?
+
+## 4.2 Automatically retry failed migrations until they succeed
+
+> Achieves goals: (2.2), (2.6)
+> Mitigates errors (3.3) and (3.4)
+
+External conditions such as failures from an unhealthy Elasticsearch cluster
+(3.3) can cause the migration to fail. The Kibana cluster should be able to
+recover automatically once these external conditions are resolved. There are
+two broad approaches to solving this problem based on whether or not
+migrations are idempotent:
+
+| Idempotent migrations |Description |
+| --------------------- | --------------------------------------------------------- |
+| Yes | Idempotent migrations performed without coordination |
+| No | Single node migrations coordinated through a lease / lock |
+
+Idempotent migrations don't require coordination making the algorithm
+significantly less complex and will never require manual intervention to
+retry. We, therefore, prefer this solution, even though it introduces
+restrictions on migrations (4.2.1.1). For other alternatives that were
+considered see section [(5)](#5-alternatives).
+
+## 4.2.1 Idempotent migrations performed without coordination
+
+The migration system can be said to be idempotent if the same results are
+produced whether the migration was run once or multiple times. This property
+should hold even if new (up to date) writes occur in between migration runs
+which introduces the following restrictions:
+
+### 4.2.1.1 Restrictions
+
+1. All document transforms need to be deterministic, that is a document
+ transform will always return the same result for the same set of inputs.
+2. It should always be possible to construct the exact set of inputs required
+ for (1) at any point during the migration process (before, during, after).
+
+Although these restrictions require significant changes, it does not prevent
+known upcoming migrations such as [sharing saved-objects in multiple spaces](https://github.com/elastic/kibana/issues/27004) or [splitting a saved
+object into multiple child
+documents](https://github.com/elastic/kibana/issues/26602). To ensure that
+these migrations are idempotent, they will have to generate new saved object
+id's deterministically with e.g. UUIDv5.
+
+
+### 4.2.1.2 Migration algorithm: Cloned index per version
+Note:
+- The description below assumes the migration algorithm is released in 7.10.0.
+ So >= 7.10.0 will use the new algorithm.
+- We refer to the alias and index that outdated nodes use as the source alias
+ and source index.
+- Every version performs a migration even if mappings or documents aren't outdated.
+
+1. Locate the source index by fetching kibana indices:
+
+ ```
+ GET '/_indices/.kibana,.kibana_7.10.0'
+ ```
+
+ The source index is:
+ 1. the index the `.kibana` alias points to, or if it doesn't exist,
+ 2. the v6.x `.kibana` index
+
+ If none of the aliases exists, this is a new Elasticsearch cluster and no
+ migrations are necessary. Create the `.kibana_7.10.0_001` index with the
+ following aliases: `.kibana` and `.kibana_7.10.0`.
+2. If the source is a < v6.5 `.kibana` index or < 7.4 `.kibana_task_manager`
+ index prepare the legacy index for a migration:
+ 1. Mark the legacy index as read-only and wait for all in-flight operations to drain (requires https://github.com/elastic/elasticsearch/pull/58094). This prevents any further writes from outdated nodes. Assuming this API is similar to the existing `//_close` API, we expect to receive `"acknowledged" : true` and `"shards_acknowledged" : true`. If all shards don’t acknowledge within the timeout, retry the operation until it succeeds.
+ 2. Create a new index which will become the source index after the legacy
+ pre-migration is complete. This index should have the same mappings as
+ the legacy index. Use a fixed index name i.e `.kibana_pre6.5.0_001` or
+ `.kibana_task_manager_pre7.4.0_001`. Ignore index already exists errors.
+ 3. Reindex the legacy index into the new source index with the
+ `convertToAlias` script if specified. Use `wait_for_completion: false`
+ to run this as a task. Ignore errors if the legacy source doesn't exist.
+ 4. Wait for the reindex task to complete. If the task doesn’t complete
+ within the 60s timeout, log a warning for visibility and poll again.
+ Ignore errors if the legacy source doesn't exist.
+ 5. Delete the legacy index and replace it with an alias of the same name
+ ```
+ POST /_aliases
+ {
+ "actions" : [
+ { "remove_index": { "index": ".kibana" } }
+ { "add": { "index": ".kibana_pre6.5.0_001", "alias": ".kibana" } },
+ ]
+ }
+ ```.
+ Unlike the delete index API, the `remove_index` action will fail if
+ provided with an _alias_. Therefore, if another instance completed this
+ step, the `.kibana` alias won't be added to `.kibana_pre6.5.0_001` a
+ second time. This avoids a situation where `.kibana` could point to both
+ `.kibana_pre6.5.0_001` and `.kibana_7.10.0_001`. These actions are
+ applied atomically so that other Kibana instances will always see either
+ a `.kibana` index or an alias, but never neither.
+
+ Ignore "The provided expression [.kibana] matches an alias, specify the
+ corresponding concrete indices instead." or "index_not_found_exception"
+ errors as this means another instance has already completed this step.
+ 6. Use the reindexed legacy `.kibana_pre6.5.0_001` as the source for the rest of the migration algorithm.
+3. If `.kibana` and `.kibana_7.10.0` both exists and are pointing to the same index this version's migration has already been completed.
+ 1. Because the same version can have plugins enabled at any point in time,
+ migrate outdated documents with step (9) and perform the mappings update in step (10).
+ 2. Skip to step (12) to start serving traffic.
+4. Fail the migration if:
+ 1. `.kibana` is pointing to an index that belongs to a later version of Kibana .e.g. `.kibana_7.12.0_001`
+ 2. (Only in 8.x) The source index contains documents that belong to an unknown Saved Object type (from a disabled plugin). Log an error explaining that the plugin that created these documents needs to be enabled again or that these objects should be deleted. See section (4.2.1.4).
+5. Set a write block on the source index. This prevents any further writes from outdated nodes.
+6. Create a new temporary index `.kibana_7.10.0_reindex_temp` with `dynamic: false` on the top-level mappings so that any kind of document can be written to the index. This allows us to write untransformed documents to the index which might have fields which have been removed from the latest mappings defined by the plugin. Define minimal mappings for the `migrationVersion` and `type` fields so that we're still able to search for outdated documents that need to be transformed.
+ 1. Ignore errors if the target index already exists.
+7. Reindex the source index into the new temporary index.
+ 1. Use `op_type=create` `conflicts=proceed` and `wait_for_completion=false` so that multiple instances can perform the reindex in parallel but only one write per document will succeed.
+ 2. Wait for the reindex task to complete. If reindexing doesn’t complete within the 60s timeout, log a warning for visibility and poll again.
+8. Clone the temporary index into the target index `.kibana_7.10.0_001`. Since any further writes will only happen against the cloned target index this prevents a lost delete from occuring where one instance finishes the migration and deletes a document and another instance's reindex operation re-creates the deleted document.
+ 1. Set a write block on the temporary index
+ 2. Clone the temporary index into the target index while specifying that the target index should have writes enabled.
+ 3. If the clone operation fails because the target index already exist, ignore the error and wait for the target index to become green before proceeding.
+ 4. (The `001` postfix in the target index name isn't used by Kibana, but allows for re-indexing an index should this be required by an Elasticsearch upgrade. E.g. re-index `.kibana_7.10.0_001` into `.kibana_7.10.0_002` and point the `.kibana_7.10.0` alias to `.kibana_7.10.0_002`.)
+9. Transform documents by reading batches of outdated documents from the target index then transforming and updating them with optimistic concurrency control.
+ 1. Ignore any version conflict errors.
+ 2. If a document transform throws an exception, add the document to a failure list and continue trying to transform all other documents. If any failures occured, log the complete list of documents that failed to transform. Fail the migration.
+10. Update the mappings of the target index
+ 1. Retrieve the existing mappings including the `migrationMappingPropertyHashes` metadata.
+ 2. Update the mappings with `PUT /.kibana_7.10.0_001/_mapping`. The API deeply merges any updates so this won't remove the mappings of any plugins that are disabled on this instance but have been enabled on another instance that also migrated this index.
+ 3. Ensure that fields are correctly indexed using the target index's latest mappings `POST /.kibana_7.10.0_001/_update_by_query?conflicts=proceed`. In the future we could optimize this query by only targeting documents:
+ 1. That belong to a known saved object type.
+11. Mark the migration as complete. This is done as a single atomic
+ operation (requires https://github.com/elastic/elasticsearch/pull/58100)
+ to guarantee that when multiple versions of Kibana are performing the
+ migration in parallel, only one version will win. E.g. if 7.11 and 7.12
+ are started in parallel and migrate from a 7.9 index, either 7.11 or 7.12
+ should succeed and accept writes, but not both.
+ 1. Check that `.kibana` alias is still pointing to the source index
+ 2. Point the `.kibana_7.10.0` and `.kibana` aliases to the target index.
+ 3. Remove the temporary index `.kibana_7.10.0_reindex_temp`
+ 4. If this fails with a "required alias [.kibana] does not exist" error or "index_not_found_exception" for the temporary index, fetch `.kibana` again:
+ 1. If `.kibana` is _not_ pointing to our target index fail the migration.
+ 2. If `.kibana` is pointing to our target index the migration has succeeded and we can proceed to step (12).
+12. Start serving traffic. All saved object reads/writes happen through the
+ version-specific alias `.kibana_7.10.0`.
+
+Together with the limitations, this algorithm ensures that migrations are
+idempotent. If two nodes are started simultaneously, both of them will start
+transforming documents in that version's target index, but because migrations
+are idempotent, it doesn’t matter which node’s writes win.
+#### Known weaknesses:
+(Also present in our existing migration algorithm since v7.4)
+When the task manager index gets reindexed a reindex script is applied.
+Because we delete the original task manager index there is no way to rollback
+a failed task manager migration without a snapshot. Although losing the task
+manager data has a fairly low impact.
+
+(Also present in our existing migration algorithm since v6.5)
+If the outdated instance isn't shutdown before starting the migration, the
+following data-loss scenario is possible:
+1. Upgrade a 7.9 index without shutting down the 7.9 nodes
+2. Kibana v7.10 performs a migration and after completing points `.kibana`
+ alias to `.kibana_7.11.0_001`
+3. Kibana v7.9 writes unmigrated documents into `.kibana`.
+4. Kibana v7.10 performs a query based on the updated mappings of documents so
+ results potentially don't match the acknowledged write from step (3).
+
+Note:
+ - Data loss won't occur if both nodes have the updated migration algorithm
+ proposed in this RFC. It is only when one of the nodes use the existing
+ algorithm that data loss is possible.
+ - Once v7.10 is restarted it will transform any outdated documents making
+ these visible to queries again.
+
+It is possible to work around this weakness by introducing a new alias such as
+`.kibana_current` so that after a migration the `.kibana` alias will continue
+to point to the outdated index. However, we decided to keep using the
+`.kibana` alias despite this weakness for the following reasons:
+ - Users might rely on `.kibana` alias for snapshots, so if this alias no
+ longer points to the latest index their snapshots would no longer backup
+ kibana's latest data.
+ - Introducing another alias introduces complexity for users and support.
+ The steps to diagnose, fix or rollback a failed migration will deviate
+ depending on the 7.x version of Kibana you are using.
+ - The existing Kibana documentation clearly states that outdated nodes should
+ be shutdown, this scenario has never been supported by Kibana.
+
+
+ In the future, this algorithm could enable (2.6) "read-only functionality during the downtime window" but this is outside of the scope of this RFC.
+
+ Although the migration algorithm guarantees there's no data loss while providing read-only access to outdated nodes, this could cause plugins to behave in unexpected ways. If we wish to persue it in the future, enabling read-only functionality during the downtime window will be it's own project and must include an audit of all plugins' behaviours.
+
+
+### 4.2.1.3 Upgrade and rollback procedure
+When a newer Kibana starts an upgrade, it blocks all writes to the outdated index to prevent data loss. Since Kibana is not designed to gracefully handle a read-only index this could have unintended consequences such as a task executing multiple times but never being able to write that the task was completed successfully. To prevent unintended consequences, the following procedure should be followed when upgrading Kibana:
+
+1. Gracefully shutdown outdated nodes by sending a `SIGTERM` signal
+ 1. Node starts returning `503` from it's healthcheck endpoint to signal to
+ the load balancer that it's no longer accepting new traffic (requires https://github.com/elastic/kibana/issues/46984).
+ 2. Allows ungoing HTTP requests to complete with a configurable timeout
+ before forcefully terminating any open connections.
+ 3. Closes any keep-alive sockets by sending a `connection: close` header.
+ 4. Shutdown all plugins and Core services.
+2. (recommended) Take a snapshot of all Kibana's Saved Objects indices. This simplifies doing a rollback to a simple snapshot restore, but is not required in order to do a rollback if a migration fails.
+3. Start the upgraded Kibana nodes. All running Kibana nodes should be on the same version, have the same plugins enabled and use the same configuration.
+
+To rollback to a previous version of Kibana with a snapshot
+1. Shutdown all Kibana nodes.
+2. Restore the Saved Object indices and aliases from the snapshot
+3. Start the rollback Kibana nodes. All running Kibana nodes should be on the same rollback version, have the same plugins enabled and use the same configuration.
+
+To rollback to a previous version of Kibana without a snapshot:
+(Assumes the migration to 7.11.0 failed)
+1. Shutdown all Kibana nodes.
+2. Remove the index created by the failed Kibana migration by using the version-specific alias e.g. `DELETE /.kibana_7.11.0`
+3. Remove the write block from the rollback index using the `.kibana` alias
+ `PUT /.kibana/_settings {"index.blocks.write": false}`
+4. Start the rollback Kibana nodes. All running Kibana nodes should be on the same rollback version, have the same plugins enabled and use the same configuration.
+
+### 4.2.1.4 Handling documents that belong to a disabled plugin
+It is possible for a plugin to create documents in one version of Kibana, but then when upgrading Kibana to a newer version, that plugin is disabled. Because the plugin is disabled it cannot register it's Saved Objects type including the mappings or any migration transformation functions. These "orphan" documents could cause future problems:
+ - A major version introduces breaking mapping changes that cannot be applied to the data in these documents.
+ - Two majors later migrations will no longer be able to migrate this old schema and could fail unexpectadly when the plugin is suddenly enabled.
+
+As a concrete example of the above, consider a user taking the following steps:
+1. Installs Kibana 7.6.0 with spaces=enabled. The spaces plugin creates a default space saved object.
+2. User upgrades to 7.10.0 but uses the OSS download which has spaces=disabled. Although the 7.10.0 spaces plugin includes a migration for space documents, the OSS release cannot migrate the documents or update it's mappings.
+3. User realizes they made a mistake and use Kibana 7.10.0 with x-pack and the spaces plugin enabled. At this point we have a completed migration for 7.10.0 but there's outdated spaces documents with migrationVersion=7.6.0 instead of 7.10.0.
+
+There are several approaches we could take to dealing with these orphan documents:
+
+1. Start up but refuse to query on types with outdated documents until a user manually triggers a re-migration
+
+ Advantages:
+ - The impact is limited to a single plugin
+
+ Disadvantages:
+ - It might be less obvious that a plugin is in a degraded state unless you read the logs (not possible on Cloud) or view the `/status` endpoint.
+ - If a user doesn't care that the plugin is degraded, orphan documents are carried forward indefinitely.
+ - Since Kibana has started receiving traffic, users can no longer
+ downgrade without losing data. They have to re-migrate, but if that
+ fails they're stuck.
+ - Introduces a breaking change in the upgrade behaviour
+
+ To perform a re-migration:
+ - Remove the `.kibana_7.10.0` alias
+ - Take a snapshot OR set the configuration option `migrations.target_index_postfix: '002'` to create a new target index `.kibana_7.10.0_002` and keep the `.kibana_7.10.0_001` index to be able to perform a rollback.
+ - Start up Kibana
+
+2. Refuse to start Kibana until the plugin is enabled or it's data deleted
+
+ Advantages:
+ - Admin’s are forced to deal with the problem as soon as they disable a plugin
+
+ Disadvantages:
+ - Cannot temporarily disable a plugin to aid in debugging or to reduce the load a Kibana plugin places on an ES cluster.
+ - Introduces a breaking change
+
+3. Refuse to start a migration until the plugin is enabled or it's data deleted
+
+ Advantages:
+ - We force users to enable a plugin or delete the documents which prevents these documents from creating future problems like a mapping update not being compatible because there are fields which are assumed to have been migrated.
+ - We keep the index “clean”.
+
+ Disadvantages:
+ - Since users have to take down outdated nodes before they can start the upgrade, they have to enter the downtime window before they know about this problem. This prolongs the downtime window and in many cases might cause an operations team to have to reschedule their downtime window to give them time to investigate the documents that need to be deleted. Logging an error on every startup could warn users ahead of time to mitigate this.
+ - We don’t expose Kibana logs on Cloud so this will have to be escalated to support and could take 48hrs to resolve (users can safely rollback, but without visibility into the logs they might not know this). Exposing Kibana logs is on the cloud team’s roadmap.
+ - It might not be obvious just from the saved object type, which plugin created these objects.
+ - Introduces a breaking change in the upgrade behaviour
+
+4. Use a hash of enabled plugins as part of the target index name
+ Using a migration target index name like
+ `.kibana_7.10.0_${hash(enabled_plugins)}_001` we can migrate all documents
+ every time a plugin is enabled / disabled.
+
+ Advantages:
+ - Outdated documents belonging to disabled plugins will be upgraded as soon
+ as the plugin is enabled again.
+
+ Disadvantages:
+ - Disabling / enabling a plugin will cause downtime (breaking change).
+ - When a plugin is enabled, disabled and enabled again our target index
+ will be an existing outdated index which needs to be deleted and
+ re-cloned. Without a way to check if the index is outdated, we cannot
+ deterministically perform the delete and re-clone operation without
+ coordination.
+
+5. Transform outdated documents (step 8) on every startup
+ Advantages:
+ - Outdated documents belonging to disabled plugins will be upgraded as soon
+ as the plugin is enabled again.
+
+ Disadvantages:
+ - Orphan documents are retained indefinitely so there's still a potential
+ for future problems.
+ - Slightly slower startup time since we have to query for outdated
+ documents every time.
+
+We prefer option (3) since it provides flexibility for disabling plugins in
+the same version while also protecting users' data in all cases during an
+upgrade migration. However, because this is a breaking change we will
+implement (5) during 7.x and only implement (3) during 8.x.
+
+# 5. Alternatives
+## 5.1 Rolling upgrades
+We considered implementing rolling upgrades to provide zero downtime
+migrations. However, this would introduce significant complexity for plugins:
+they will need to maintain up and down migration transformations and ensure
+that queries match both current and outdated documents across all
+versions. Although we can afford the once-off complexity of implementing
+rolling upgrades, the complexity burden of maintaining plugins that support
+rolling-upgrades will slow down all development in Kibana. Since a predictable
+downtime window is sufficient for our users, we decided against trying to
+achieve zero downtime with rolling upgrades. See "Rolling upgrades" in
+https://github.com/elastic/kibana/issues/52202 for more information.
+
+## 5.2 Single node migrations coordinated through a lease/lock
+This alternative is a proposed algorithm for coordinating migrations so that
+these only happen on a single node and therefore don't have the restrictions
+found in [(4.2.1.1)](#4311-restrictions). We decided against this algorithm
+primarily because it is a lot more complex, but also because it could still
+require manual intervention to retry from certain unlikely edge cases.
+
+
+ It's impossible to guarantee that a single node performs the
+ migration and automatically retry failed migrations.
+
+Coordination should ensure that only one Kibana node performs the migration at
+a given time which can be achived with a distributed lock built on top of
+Elasticsearch. For the Kibana cluster to be able to retry a failed migration,
+requires a specialized lock which expires after a given amount of inactivity.
+We will refer to such expiring locks as a "lease".
+
+If a Kibana process stalls, it is possible that the process' lease has expired
+but the process doesn't yet recognize this and continues the migration. To
+prevent this from causing data loss each lease should be accompanied by a
+"guard" that prevents all writes after the lease has expired. See
+[how to do distributed
+locking](https://martin.kleppmann.com/2016/02/08/how-to-do-distributed-locking.html)
+for an in-depth discussion.
+
+Elasticsearch doesn't provide any building blocks for constructing such a guard.
+
+
+However, we can implement a lock (that never expires) with strong
+data-consistency guarantees. Because there’s no expiration, a failure between
+obtaining the lock and releasing it will require manual intervention. Instead
+of trying to accomplish the entire migration after obtaining a lock, we can
+only perform the last step of the migration process, moving the aliases, with
+a lock. A permanent failure in only this last step is not impossible, but very
+unlikely.
+
+### 5.2.1 Migration algorithm
+1. Obtain a document lock (see [5.2.2 Document lock
+ algorithm](#522-document-lock-algorithm)). Convert the lock into a "weak
+ lease" by expiring locks for nodes which aren't active (see [4.2.2.4
+ Checking for lease expiry](#4324-checking-for-lease-expiry)). This "weak
+ lease" doesn't require strict guarantees since it's only used to prevent
+ multiple Kibana nodes from performing a migration in parallel to reduce the
+ load on Elasticsearch.
+2. Migrate data into a new process specific index (we could use the process
+ UUID that’s used in the lease document like
+ `.kibana_3ef25ff1-090a-4335-83a0-307a47712b4e`).
+3. Obtain a document lock (see [5.2.2 Document lock
+ algorithm](#522-document-lock-algorithm)).
+4. Finish the migration by pointing `.kibana` →
+ `.kibana_3ef25ff1-090a-4335-83a0-307a47712b4e`. This automatically releases
+ the document lock (and any leases) because the new index will contain an
+ empty `kibana_cluster_state`.
+
+If a process crashes or is stopped after (3) but before (4) the lock will have
+to be manually removed by deleting the `kibana_cluster_state` document from
+`.kibana` or restoring from a snapshot.
+
+### 5.2.2 Document lock algorithm
+To improve on the existing Saved Objects migrations lock, a locking algorithm
+needs to satisfy the following requirements:
+- Must guarantee that only a single node can obtain the lock. Since we can
+ only provide strong data-consistency guarantees on the document level in
+ Elasticsearch our locking mechanism needs to be based on a document.
+- Manually removing the lock
+ - shouldn't have any risk of accidentally causing data loss.
+ - can be done with a single command that's always the same (shouldn’t
+ require trying to find `n` for removing the correct `.kibana_n` index).
+- Must be easy to retrieve the lock/cluster state to aid in debugging or to
+ provide visibility.
+
+Algorithm:
+1. Node reads `kibana_cluster_state` lease document from `.kibana`
+2. It sends a heartbeat every `heartbeat_interval` seconds by sending an
+ update operation that adds it’s UUID to the `nodes` array and sets the
+ `lastSeen` value to the current local node time. If the update fails due to
+ a version conflict the update operation is retried after a random delay by
+ fetching the document again and attempting the update operation once more.
+3. To obtain a lease, a node:
+ 1. Fetches the `kibana_cluster_state` document
+ 2. If all the nodes’ `hasLock === false` it sets it’s own `hasLock` to
+ true and attempts to write the document. If the update fails
+ (presumably because of another node’s heartbeat update) it restarts the
+ process to obtain a lease from step (3).
+ 3. If another nodes’ `hasLock === true` the node failed to acquire a
+ lock and waits until the active lock has expired before attempting to
+ obtain a lock again.
+4. Once a node is done with its lock, it releases it by fetching and then
+ updating `hasLock = false`. The fetch + update operations are retried until
+ this node’s `hasLock === false`.
+
+Each machine writes a `UUID` to a file, so a single machine may have multiple
+processes with the same Kibana `UUID`, so we should rather generate a new UUID
+just for the lifetime of this process.
+
+`KibanaClusterState` document format:
+```js
+ nodes: {
+ "852bd94e-5121-47f3-a321-e09d9db8d16e": {
+ version: "7.6.0",
+ lastSeen: [ 1114793, 555149266 ], // hrtime() big int timestamp
+ hasLease: true,
+ hasLock: false,
+ },
+ "8d975c5b-cbf6-4418-9afb-7aa3ea34ac90": {
+ version: "7.6.0",
+ lastSeen: [ 1114862, 841295591 ],
+ hasLease: false,
+ hasLock: false,
+ },
+ "3ef25ff1-090a-4335-83a0-307a47712b4e": {
+ version: "7.6.0",
+ lastSeen: [ 1114877, 611368546 ],
+ hasLease: false,
+ hasLock: false,
+ },
+ },
+ oplog: [
+ {op: 'ACQUIRE_LOCK', node: '852bd94e...', timestamp: '2020-04-20T11:58:56.176Z'}
+ ]
+}
+```
+
+### 5.2.3 Checking for "weak lease" expiry
+The simplest way to check for lease expiry is to inspect the `lastSeen` value.
+If `lastSeen + expiry_timeout > now` the lock is considered expired. If there
+are clock drift or daylight savings time adjustments, there’s a risk that a
+node loses it’s lease before `expiry_timeout` has occurred. Since losing a
+lock prematurely will not lead to data loss it’s not critical that the
+expiry time is observed under all conditions.
+
+A slightly safer approach is to use a monotonically increasing clock
+(`process.hrtime()`) and relative time to determine expiry. Using a
+monotonically increasing clock guarantees that the clock will always increase
+even if the system time changes due to daylight savings time, NTP clock syncs,
+or manually setting the time. To check for expiry, other nodes poll the
+cluster state document. Once they see that the `lastSeen` value has increased,
+they capture the current hr time `current_hr_time` and starts waiting until
+`process.hrtime() - current_hr_time > expiry_timeout` if at that point
+`lastSeen` hasn’t been updated the lease is considered to have expired. This
+means other nodes can take up to `2*expiry_timeout` to recognize an expired
+lease, but a lease will never expire prematurely.
+
+Any node that detects an expired lease can release that lease by setting the
+expired node’s `hasLease = false`. It can then attempt to acquire its lease.
+
+## 5.3 Minimize data loss with mixed Kibana versions during 7.x
+When multiple versions of Kibana are running at the same time, writes from the
+outdated node can end up either in the outdated Kibana index, the newly
+migrated index, or both. New documents added (and some updates) into the old
+index while a migration is in-progress will be lost. Writes that end up in the
+new index will be in an outdated format. This could cause queries on the data
+to only return a subset of the results which leads to incorrect results or
+silent data loss.
+
+Minimizing data loss from mixed 7.x versions, introduces two additional steps
+to rollback to a previous version without a snapshot:
+1. (existing) Point the `.kibana` alias to the previous Kibana index `.kibana_n-1`
+2. (existing) Delete `.kibana_n`
+3. (new) Enable writes on `.kibana_n-1`
+4. (new) Delete the dummy "version lock" document from `.kibana_n-1`
+
+Since our documentation and server logs have implicitly encouraged users to
+rollback without using snapshots, many users might have to rely on these
+additional migration steps to perform a rollback. Since even the existing
+steps are error prone, introducing more steps will likely introduce more
+problems than what it solves.
+
+1. All future versions of Kibana 7.x will use the `.kibana_saved_objects`
+ alias to locate the current index. If `.kibana_saved_objects` doesn't
+ exist, newer versions will fallback to reading `.kibana`.
+2. All future versions of Kibana will locate the index that
+ `.kibana_saved_objects` points to and then read and write directly from
+ the _index_ instead of the alias.
+3. Before starting a migration:
+ 1. Write a new dummy "version lock" document to the `.kibana` index with a
+ `migrationVersion` set to the current version of Kibana. If an outdated
+ node is started up after a migration was started it will detect that
+ newer documents are present in the index and refuse to start up.
+ 2. Set the outdated index to read-only. Since `.kibana` is never advanced,
+ it will be pointing to a read-only index which prevent writes from
+ 6.8+ releases which are already online.
+
+## 5.4 In-place migrations that re-use the same index (8.0)
+> We considered an algorithm that re-uses the same index for migrations and an approach to minimize data-loss if our upgrade procedures aren't followed. This is no longer our preferred approach because of several downsides:
+> - It requires taking snapshots to prevent data loss so we can only release this in 8.x
+> - Minimizing data loss with unsupported upgrade configurations adds significant complexity and still doesn't guarantee that data isn't lost.
+
+### 5.4.1 Migration algorithm (8.0):
+1. Exit Kibana with a fatal error if a newer node has started a migration by
+ checking for:
+ 1. Documents with a newer `migrationVersion` numbers.
+2. If the mappings are out of date, update the mappings to the combination of
+ the index's current mappings and the expected mappings.
+3. If there are outdated documents, migrate these in batches:
+ 1. Read a batch of outdated documents from the index.
+ 2. Transform documents by applying the migration transformation functions.
+ 3. Update the document batch in the same index using optimistic concurrency
+ control. If a batch fails due to an update version mismatch continue
+ migrating the other batches.
+ 4. If a batch fails due other reasons repeat the entire migration process.
+4. If any of the batches in step (3.3) failed, repeat the entire migration
+ process. This ensures that in-progress bulk update operations from an
+ outdated node won't lead to unmigrated documents still being present after
+ the migration.
+5. Once all documents are up to date, the migration is complete and Kibana can
+ start serving traffic.
+
+Advantages:
+- Not duplicating all documents into a new index will speed up migrations and
+ reduce the downtime window. This will be especially important for the future
+ requirement to support > 10k or > 100k documents.
+- We can check the health of an existing index before starting the migration,
+ but we cannot detect what kind of failures might occur while creating a new
+ index. Whereas retrying migrations will eventually recover from the errors
+ in (3.3), re-using an index allows us to detect these problems before trying
+ and avoid errors like (3.3.1) altogether.
+- Single index to backup instead of “index pattern” that matches any
+ `.kibana_n`.
+- Simplifies Kibana system index Elasticsearch plugin since it needs to work
+ on one index per "tenant".
+- By leveraging optimistic concurrency control we can further minimize data
+ loss for unsupported upgrade configurations in the future.
+
+Drawbacks:
+- Cannot make breaking mapping changes (even though it was possible, we have not
+ introduced a breaking mapping change during 7.x).
+- Rollback is only possible by restoring a snapshot which requires educating
+ users to ensure that they don't rely on `.kibana_n` indices as backups.
+ (Apart from the need to educate users, snapshot restores provide many
+ benefits).
+- It narrows the second restriction under (4.2.1) even further: migrations
+ cannot rely on any state that could change as part of a migration because we
+ can no longer use the previous index as a snapshot of unmigrated state.
+- We can’t automatically perform a rollback from a half-way done migration.
+- It’s impossible to provide read-only functionality for outdated nodes which
+ means we can't achieve goal (2.7).
+
+### 5.4.2 Minimizing data loss with unsupported upgrade configurations (8.0)
+> This alternative can reduce some data loss when our upgrade procedure isn't
+> followed with the algorithm in (5.4.1).
+
+Even if (4.5.2) is the only supported upgrade procedure, we should try to
+prevent data loss when these instructions aren't followed.
+
+To prevent data loss we need to prevent any writes from older nodes. We use
+a version-specific alias for this purpose. Each time a migration is started,
+all other aliases are removed. However, aliases are stored inside
+Elasticsearch's ClusterState and this state could remain inconsistent between
+nodes for an unbounded amount of time. In addition, bulk operations that were
+accepted before the alias was removed will continue to run even after removing
+the alias.
+
+As a result, Kibana cannot guarantee that there would be no data loss but
+instead, aims to minimize it as much as possible by adding the bold sections
+to the migration algorithm from (5.4.1)
+
+1. **Disable `action.auto_create_index` for the Kibana system indices.**
+2. Exit Kibana with a fatal error if a newer node has started a migration by
+ checking for:
+ 1. **Version-specific aliases on the `.kibana` index with a newer version.**
+ 2. Documents with newer `migrationVersion` numbers.
+3. **Remove all other aliases and create a new version-specific alias for
+ reading and writing to the `.kibana` index .e.g `.kibana_8.0.1`. During and
+ after the migration, all saved object reads and writes use this alias
+ instead of reading or writing directly to the index. By using the atomic
+ `POST /_aliases` API we minimize the chance that an outdated node creating
+ new outdated documents can cause data loss.**
+4. **Wait for the default bulk operation timeout of 30s. This ensures that any
+ bulk operations accepted before the removal of the alias have either
+ completed or returned a timeout error to it's initiator.**
+5. If the mappings are out of date, update the mappings **through the alias**
+ to the combination of the index's current mappings and the expected
+ mappings. **If this operation fails due to an index missing exception (most
+ likely because another node removed our version-specific alias) repeat the
+ entire migration process.**
+6. If there are outdated documents, migrate these in batches:
+ 1. Read a batch of outdated documents from `.kibana_n`.
+ 2. Transform documents by applying the migration functions.
+ 3. Update the document batch in the same index using optimistic concurrency
+ control. If a batch fails due to an update version mismatch continue
+ migrating the other batches.
+ 4. If a batch fails due other reasons repeat the entire migration process.
+7. If any of the batches in step (6.3) failed, repeat the entire migration
+ process. This ensures that in-progress bulk update operations from an
+ outdated node won't lead to unmigrated documents still being present after
+ the migration.
+8. Once all documents are up to date, the migration is complete and Kibana can
+ start serving traffic.
+
+Steps (2) and (3) from the migration algorithm in minimize the chances of the
+following scenarios occuring but cannot guarantee it. It is therefore useful
+to enumarate some scenarios and their worst case impact:
+1. An outdated node issued a bulk create to it's version-specific alias.
+ Because a user doesn't wait for all traffic to drain a newer node starts
+ it's migration before the bulk create was complete. Since this bulk create
+ was accepted before the newer node deleted the previous version-specific
+ aliases, it is possible that the index now contains some outdated documents
+ that the new node is unaware of and doesn't migrate. Although these outdated
+ documents can lead to inconsistent query results and data loss, step (4)
+ ensures that an error will be returned to the node that created these
+ objects.
+2. A 8.1.0 node and a 8.2.0 node starts migrating a 8.0.0 index in parallel.
+ Even though the 8.2.0 node will remove the 8.1.0 version-specific aliases,
+ the 8.1.0 node could have sent an bulk update operation that got accepted
+ before its alias was removed. When the 8.2.0 node tries to migrate these
+ 8.1.0 documents it gets a version conflict but cannot be sure if this was
+ because another node of the same version migrated this document (which can
+ safely be ignored) or interference from a different Kibana version. The
+ 8.1.0 node will hit the error in step (6.3) and restart the migration but
+ then ultimately fail at step (2). The 8.2.0 node will repeat the entire
+ migration process from step (7) thus ensuring that all documents are up to
+ date.
+3. A race condition with another Kibana node on the same version, but with
+ different enabled plugins caused this node's required mappings to be
+ overwritten. If this causes a mapper parsing exception in step (6.3) we can
+ restart the migration. Because updating the mappings is additive and saved
+ object types are unique to a plugin, restarting the migration will allow
+ the node to update the mappings to be compatible with node's plugins. Both
+ nodes will be able to successfully complete the migration of their plugins'
+ registered saved object types. However, if the migration doesn't trigger a
+ mapper parsing exception the incompatible mappings would go undetected
+ which can cause future problems like write failures or inconsistent query
+ results.
+
+## 5.5 Tag objects as “invalid” if their transformation fails
+> This alternative prevents a failed migration when there's a migration transform function bug or a document with invalid data. Although it seems preferable to not fail the entire migration because of a single saved object type's migration transform bug or a single invalid document this has several pitfalls:
+> 1. When an object fails to migrate the data for that saved object type becomes inconsistent. This could load to a critical feature being unavailable to a user leaving them with no choice but to downgrade.
+> 2. Because Kibana starts accepting traffic after encountering invalid objects a rollback will lead to data loss leaving users with no clean way to recover.
+> As a result we prefer to let an upgrade fail and making it easy for users to rollback until they can resolve the root cause.
+
+> Achieves goals: (2.2)
+> Mitigates Errors (3.1), (3.2)
+
+1. Tag objects as “invalid” if they cause an exception when being transformed,
+ but don’t fail the entire migration.
+2. Log an error message informing administrators that there are invalid
+ objects which require inspection. For each invalid object, provide an error
+ stack trace to aid in debugging.
+3. Administrators should be able to generate a migration report (similar to
+ the one dry run migrations create) which is an NDJSON export of all objects
+ tagged as “invalid”.
+ 1. Expose this as an HTTP API first
+ 2. (later) Notify administrators and allow them to export invalid objects
+ from the Kibana UI.
+4. When an invalid object is read, the Saved Objects repository will throw an
+ invalid object exception which should include a link to the documentation
+ to help administrators resolve migration bugs.
+5. Educate Kibana developers to no longer simply write back an unmigrated
+ document if an exception occurred. A migration function should either
+ successfully transform the object or throw.
+
+# 6. How we teach this
+1. Update documentation and server logs to start educating users to depend on
+ snapshots for Kibana rollbacks.
+2. Update developer documentation and educate developers with best practices
+ for writing migration functions.
+
+# 7. Unresolved questions
+1. When cloning an index we can only ever add new fields to the mappings. When
+ a saved object type or specific field is removed, the mappings will remain
+ until we re-index. Is it sufficient to only re-index every major? How do we
+ track the field count as it grows over every upgrade?
+2. More generally, how do we deal with the growing field count approaching the
+ default limit of 1000?
\ No newline at end of file
diff --git a/src/core/public/chrome/chrome_service.mock.ts b/src/core/public/chrome/chrome_service.mock.ts
index b624084258817..5e29218250fb9 100644
--- a/src/core/public/chrome/chrome_service.mock.ts
+++ b/src/core/public/chrome/chrome_service.mock.ts
@@ -9,7 +9,7 @@
import { BehaviorSubject } from 'rxjs';
import type { PublicMethodsOf } from '@kbn/utility-types';
import type { DeeplyMockedKeys } from '@kbn/utility-types/jest';
-import { ChromeBadge, ChromeBrand, ChromeBreadcrumb, ChromeService, InternalChromeStart } from './';
+import { ChromeBadge, ChromeBreadcrumb, ChromeService, InternalChromeStart } from './';
const createStartContractMock = () => {
const startContract: DeeplyMockedKeys = {
@@ -40,14 +40,8 @@ const createStartContractMock = () => {
getCenter$: jest.fn(),
getRight$: jest.fn(),
},
- setAppTitle: jest.fn(),
- setBrand: jest.fn(),
- getBrand$: jest.fn(),
setIsVisible: jest.fn(),
getIsVisible$: jest.fn(),
- addApplicationClass: jest.fn(),
- removeApplicationClass: jest.fn(),
- getApplicationClasses$: jest.fn(),
getBadge$: jest.fn(),
setBadge: jest.fn(),
getBreadcrumbs$: jest.fn(),
@@ -64,9 +58,7 @@ const createStartContractMock = () => {
getBodyClasses$: jest.fn(),
};
startContract.navLinks.getAll.mockReturnValue([]);
- startContract.getBrand$.mockReturnValue(new BehaviorSubject({} as ChromeBrand));
startContract.getIsVisible$.mockReturnValue(new BehaviorSubject(false));
- startContract.getApplicationClasses$.mockReturnValue(new BehaviorSubject(['class-name']));
startContract.getBadge$.mockReturnValue(new BehaviorSubject({} as ChromeBadge));
startContract.getBreadcrumbs$.mockReturnValue(new BehaviorSubject([{} as ChromeBreadcrumb]));
startContract.getBreadcrumbsAppendExtension$.mockReturnValue(new BehaviorSubject(undefined));
diff --git a/src/core/public/chrome/chrome_service.test.ts b/src/core/public/chrome/chrome_service.test.ts
index 92f5a854f6b00..8df8d76a13c46 100644
--- a/src/core/public/chrome/chrome_service.test.ts
+++ b/src/core/public/chrome/chrome_service.test.ts
@@ -23,8 +23,10 @@ import { getAppInfo } from '../application/utils';
class FakeApp implements App {
public title = `${this.id} App`;
public mount = () => () => {};
+
constructor(public id: string, public chromeless?: boolean) {}
}
+
const store = new Map();
const originalLocalStorage = window.localStorage;
@@ -170,36 +172,6 @@ describe('start', () => {
});
});
- describe('brand', () => {
- it('updates/emits the brand as it changes', async () => {
- const { chrome, service } = await start();
- const promise = chrome.getBrand$().pipe(toArray()).toPromise();
-
- chrome.setBrand({
- logo: 'big logo',
- smallLogo: 'not so big logo',
- });
- chrome.setBrand({
- logo: 'big logo without small logo',
- });
- service.stop();
-
- await expect(promise).resolves.toMatchInlineSnapshot(`
- Array [
- Object {},
- Object {
- "logo": "big logo",
- "smallLogo": "not so big logo",
- },
- Object {
- "logo": "big logo without small logo",
- "smallLogo": undefined,
- },
- ]
- `);
- });
- });
-
describe('visibility', () => {
it('emits false when no application is mounted', async () => {
const { chrome, service } = await start();
@@ -289,54 +261,6 @@ describe('start', () => {
});
});
- describe('application classes', () => {
- it('updates/emits the application classes', async () => {
- const { chrome, service } = await start();
- const promise = chrome.getApplicationClasses$().pipe(toArray()).toPromise();
-
- chrome.addApplicationClass('foo');
- chrome.addApplicationClass('foo');
- chrome.addApplicationClass('bar');
- chrome.addApplicationClass('bar');
- chrome.addApplicationClass('baz');
- chrome.removeApplicationClass('bar');
- chrome.removeApplicationClass('foo');
- service.stop();
-
- await expect(promise).resolves.toMatchInlineSnapshot(`
- Array [
- Array [],
- Array [
- "foo",
- ],
- Array [
- "foo",
- ],
- Array [
- "foo",
- "bar",
- ],
- Array [
- "foo",
- "bar",
- ],
- Array [
- "foo",
- "bar",
- "baz",
- ],
- Array [
- "foo",
- "baz",
- ],
- Array [
- "baz",
- ],
- ]
- `);
- });
- });
-
describe('badge', () => {
it('updates/emits the current badge', async () => {
const { chrome, service } = await start();
@@ -407,7 +331,9 @@ describe('start', () => {
const { chrome, service } = await start();
const promise = chrome.getBreadcrumbsAppendExtension$().pipe(toArray()).toPromise();
- chrome.setBreadcrumbsAppendExtension({ content: (element) => () => {} });
+ chrome.setBreadcrumbsAppendExtension({
+ content: (element) => () => {},
+ });
service.stop();
await expect(promise).resolves.toMatchInlineSnapshot(`
@@ -521,14 +447,12 @@ describe('start', () => {
describe('stop', () => {
it('completes applicationClass$, getIsNavDrawerLocked, breadcrumbs$, isVisible$, and brand$ observables', async () => {
const { chrome, service } = await start();
- const promise = Rx.combineLatest(
- chrome.getBrand$(),
- chrome.getApplicationClasses$(),
+ const promise = Rx.combineLatest([
chrome.getIsNavDrawerLocked$(),
chrome.getBreadcrumbs$(),
chrome.getIsVisible$(),
- chrome.getHelpExtension$()
- ).toPromise();
+ chrome.getHelpExtension$(),
+ ]).toPromise();
service.stop();
await promise;
@@ -539,14 +463,12 @@ describe('stop', () => {
service.stop();
await expect(
- Rx.combineLatest(
- chrome.getBrand$(),
- chrome.getApplicationClasses$(),
+ Rx.combineLatest([
chrome.getIsNavDrawerLocked$(),
chrome.getBreadcrumbs$(),
chrome.getIsVisible$(),
- chrome.getHelpExtension$()
- ).toPromise()
+ chrome.getHelpExtension$(),
+ ]).toPromise()
).resolves.toBe(undefined);
});
});
diff --git a/src/core/public/chrome/chrome_service.tsx b/src/core/public/chrome/chrome_service.tsx
index f1381c52ce779..5740e1739280a 100644
--- a/src/core/public/chrome/chrome_service.tsx
+++ b/src/core/public/chrome/chrome_service.tsx
@@ -26,7 +26,6 @@ import { ChromeRecentlyAccessed, RecentlyAccessedService } from './recently_acce
import { Header } from './ui';
import {
ChromeBadge,
- ChromeBrand,
ChromeBreadcrumb,
ChromeBreadcrumbsAppendExtension,
ChromeHelpExtension,
@@ -105,9 +104,6 @@ export class ChromeService {
}: StartDeps): Promise {
this.initVisibility(application);
- const appTitle$ = new BehaviorSubject('Kibana');
- const brand$ = new BehaviorSubject({});
- const applicationClasses$ = new BehaviorSubject>(new Set());
const helpExtension$ = new BehaviorSubject(undefined);
const breadcrumbs$ = new BehaviorSubject([]);
const breadcrumbsAppendExtension$ = new BehaviorSubject<
@@ -210,7 +206,6 @@ export class ChromeService {
),
- setAppTitle: (appTitle: string) => appTitle$.next(appTitle),
-
- getBrand$: () => brand$.pipe(takeUntil(this.stop$)),
-
- setBrand: (brand: ChromeBrand) => {
- brand$.next(
- Object.freeze({
- logo: brand.logo,
- smallLogo: brand.smallLogo,
- })
- );
- },
-
getIsVisible$: () => this.isVisible$,
setIsVisible: (isVisible: boolean) => this.isForceHidden$.next(!isVisible),
- getApplicationClasses$: () =>
- applicationClasses$.pipe(
- map((set) => [...set]),
- takeUntil(this.stop$)
- ),
-
- addApplicationClass: (className: string) => {
- const update = new Set([...applicationClasses$.getValue()]);
- update.add(className);
- applicationClasses$.next(update);
- },
-
- removeApplicationClass: (className: string) => {
- const update = new Set([...applicationClasses$.getValue()]);
- update.delete(className);
- applicationClasses$.next(update);
- },
-
getBadge$: () => badge$.pipe(takeUntil(this.stop$)),
setBadge: (badge: ChromeBadge) => {
diff --git a/src/core/public/chrome/index.ts b/src/core/public/chrome/index.ts
index dd7affcdbf7cd..b1a70c1dc2b04 100644
--- a/src/core/public/chrome/index.ts
+++ b/src/core/public/chrome/index.ts
@@ -29,7 +29,6 @@ export type {
ChromeHelpExtension,
ChromeBreadcrumbsAppendExtension,
ChromeBreadcrumb,
- ChromeBrand,
ChromeBadge,
ChromeUserBanner,
} from './types';
diff --git a/src/core/public/chrome/types.ts b/src/core/public/chrome/types.ts
index 732236f1ba4a1..813f385fc94d2 100644
--- a/src/core/public/chrome/types.ts
+++ b/src/core/public/chrome/types.ts
@@ -22,12 +22,6 @@ export interface ChromeBadge {
iconType?: IconType;
}
-/** @public */
-export interface ChromeBrand {
- logo?: string;
- smallLogo?: string;
-}
-
/** @public */
export type ChromeBreadcrumb = EuiBreadcrumb;
@@ -93,40 +87,6 @@ export interface ChromeStart {
/** {@inheritdoc ChromeDocTitle} */
docTitle: ChromeDocTitle;
- /**
- * Sets the current app's title
- *
- * @internalRemarks
- * This should be handled by the application service once it is in charge
- * of mounting applications.
- */
- setAppTitle(appTitle: string): void;
-
- /**
- * Get an observable of the current brand information.
- */
- getBrand$(): Observable;
-
- /**
- * Set the brand configuration.
- *
- * @remarks
- * Normally the `logo` property will be rendered as the
- * CSS background for the home link in the chrome navigation, but when the page is
- * rendered in a small window the `smallLogo` will be used and rendered at about
- * 45px wide.
- *
- * @example
- * ```js
- * chrome.setBrand({
- * logo: 'url(/plugins/app/logo.png) center no-repeat'
- * smallLogo: 'url(/plugins/app/logo-small.png) center no-repeat'
- * })
- * ```
- *
- */
- setBrand(brand: ChromeBrand): void;
-
/**
* Get an observable of the current visibility state of the chrome.
*/
@@ -139,21 +99,6 @@ export interface ChromeStart {
*/
setIsVisible(isVisible: boolean): void;
- /**
- * Get the current set of classNames that will be set on the application container.
- */
- getApplicationClasses$(): Observable;
-
- /**
- * Add a className that should be set on the application container.
- */
- addApplicationClass(className: string): void;
-
- /**
- * Remove a className added with `addApplicationClass()`. If className is unknown it is ignored.
- */
- removeApplicationClass(className: string): void;
-
/**
* Get an observable of the current badge
*/
@@ -232,6 +177,7 @@ export interface InternalChromeStart extends ChromeStart {
* @internal
*/
getHeaderComponent(): JSX.Element;
+
/**
* Used only by the rendering service to retrieve the set of classNames
* that will be set on the body element.
diff --git a/src/core/public/chrome/ui/header/__snapshots__/header.test.tsx.snap b/src/core/public/chrome/ui/header/__snapshots__/header.test.tsx.snap
index d2bc11f4db877..4450533090c7f 100644
--- a/src/core/public/chrome/ui/header/__snapshots__/header.test.tsx.snap
+++ b/src/core/public/chrome/ui/header/__snapshots__/header.test.tsx.snap
@@ -9,45 +9,7 @@ exports[`Header renders 1`] = `
"closed": false,
"hasError": false,
"isStopped": false,
- "observers": Array [
- Subscriber {
- "_parentOrParents": null,
- "_subscriptions": Array [
- SubjectSubscription {
- "_parentOrParents": [Circular],
- "_subscriptions": null,
- "closed": false,
- "subject": [Circular],
- "subscriber": [Circular],
- },
- ],
- "closed": false,
- "destination": SafeSubscriber {
- "_complete": undefined,
- "_context": [Circular],
- "_error": undefined,
- "_next": [Function],
- "_parentOrParents": null,
- "_parentSubscriber": [Circular],
- "_subscriptions": null,
- "closed": false,
- "destination": Object {
- "closed": true,
- "complete": [Function],
- "error": [Function],
- "next": [Function],
- },
- "isStopped": false,
- "syncErrorThrowable": false,
- "syncErrorThrown": false,
- "syncErrorValue": null,
- },
- "isStopped": false,
- "syncErrorThrowable": true,
- "syncErrorThrown": false,
- "syncErrorValue": null,
- },
- ],
+ "observers": Array [],
"thrownError": null,
}
}
@@ -4713,55 +4675,6 @@ exports[`Header renders 1`] = `
+
+
+ Kibana
+
+
+
+`;
diff --git a/src/core/public/chrome/ui/header/header.tsx b/src/core/public/chrome/ui/header/header.tsx
index a401195b38942..578c87411e543 100644
--- a/src/core/public/chrome/ui/header/header.tsx
+++ b/src/core/public/chrome/ui/header/header.tsx
@@ -52,7 +52,6 @@ export interface HeaderProps {
kibanaVersion: string;
application: InternalApplicationStart;
headerBanner$: Observable;
- appTitle$: Observable;
badge$: Observable;
breadcrumbs$: Observable;
breadcrumbsAppendExtension$: Observable;
@@ -102,9 +101,7 @@ export function Header({
const toggleCollapsibleNavRef = createRef void }>();
const className = classnames('hide-for-sharing', 'headerGlobalNav');
- const Breadcrumbs = (
-
- );
+ const Breadcrumbs = ;
return (
<>
diff --git a/src/core/public/chrome/ui/header/header_breadcrumbs.test.tsx b/src/core/public/chrome/ui/header/header_breadcrumbs.test.tsx
index 26b397229d7e9..7d40bd77e2548 100644
--- a/src/core/public/chrome/ui/header/header_breadcrumbs.test.tsx
+++ b/src/core/public/chrome/ui/header/header_breadcrumbs.test.tsx
@@ -15,9 +15,7 @@ import { HeaderBreadcrumbs } from './header_breadcrumbs';
describe('HeaderBreadcrumbs', () => {
it('renders updates to the breadcrumbs$ observable', () => {
const breadcrumbs$ = new BehaviorSubject([{ text: 'First' }]);
- const wrapper = mount(
-
- );
+ const wrapper = mount();
expect(wrapper.find('.euiBreadcrumb')).toMatchSnapshot();
act(() => breadcrumbs$.next([{ text: 'First' }, { text: 'Second' }]));
diff --git a/src/core/public/chrome/ui/header/header_breadcrumbs.tsx b/src/core/public/chrome/ui/header/header_breadcrumbs.tsx
index 0e2bae82a3ad3..a90ceed32dcce 100644
--- a/src/core/public/chrome/ui/header/header_breadcrumbs.tsx
+++ b/src/core/public/chrome/ui/header/header_breadcrumbs.tsx
@@ -14,17 +14,15 @@ import { Observable } from 'rxjs';
import { ChromeBreadcrumb } from '../../types';
interface Props {
- appTitle$: Observable;
breadcrumbs$: Observable;
}
-export function HeaderBreadcrumbs({ appTitle$, breadcrumbs$ }: Props) {
- const appTitle = useObservable(appTitle$, 'Kibana');
+export function HeaderBreadcrumbs({ breadcrumbs$ }: Props) {
const breadcrumbs = useObservable(breadcrumbs$, []);
let crumbs = breadcrumbs;
- if (breadcrumbs.length === 0 && appTitle) {
- crumbs = [{ text: appTitle }];
+ if (breadcrumbs.length === 0) {
+ crumbs = [{ text: 'Kibana' }];
}
crumbs = crumbs.map((breadcrumb, i) => ({
diff --git a/src/core/public/index.ts b/src/core/public/index.ts
index e6e6433291873..d343a0b081fa1 100644
--- a/src/core/public/index.ts
+++ b/src/core/public/index.ts
@@ -28,7 +28,6 @@ import './index.scss';
import {
ChromeBadge,
- ChromeBrand,
ChromeBreadcrumb,
ChromeHelpExtension,
ChromeHelpExtensionMenuLink,
@@ -287,7 +286,6 @@ export interface CoreStart {
export type {
Capabilities,
ChromeBadge,
- ChromeBrand,
ChromeBreadcrumb,
ChromeHelpExtension,
ChromeHelpExtensionMenuLink,
diff --git a/src/core/public/public.api.md b/src/core/public/public.api.md
index d3f9ce71379b7..2217b71d2f1a3 100644
--- a/src/core/public/public.api.md
+++ b/src/core/public/public.api.md
@@ -231,14 +231,6 @@ export interface ChromeBadge {
tooltip: string;
}
-// @public (undocumented)
-export interface ChromeBrand {
- // (undocumented)
- logo?: string;
- // (undocumented)
- smallLogo?: string;
-}
-
// @public (undocumented)
export type ChromeBreadcrumb = EuiBreadcrumb;
@@ -355,11 +347,8 @@ export interface ChromeRecentlyAccessedHistoryItem {
// @public
export interface ChromeStart {
- addApplicationClass(className: string): void;
docTitle: ChromeDocTitle;
- getApplicationClasses$(): Observable;
getBadge$(): Observable;
- getBrand$(): Observable;
getBreadcrumbs$(): Observable;
// Warning: (ae-forgotten-export) The symbol "ChromeBreadcrumbsAppendExtension" needs to be exported by the entry point index.d.ts
getBreadcrumbsAppendExtension$(): Observable;
@@ -370,10 +359,7 @@ export interface ChromeStart {
navControls: ChromeNavControls;
navLinks: ChromeNavLinks;
recentlyAccessed: ChromeRecentlyAccessed;
- removeApplicationClass(className: string): void;
- setAppTitle(appTitle: string): void;
setBadge(badge?: ChromeBadge): void;
- setBrand(brand: ChromeBrand): void;
setBreadcrumbs(newBreadcrumbs: ChromeBreadcrumb[]): void;
setBreadcrumbsAppendExtension(breadcrumbsAppendExtension?: ChromeBreadcrumbsAppendExtension): void;
setCustomNavLink(newCustomNavLink?: Partial): void;
diff --git a/src/core/public/rendering/app_containers.test.tsx b/src/core/public/rendering/app_containers.test.tsx
index 193e393f268f0..10f5f3f1c138f 100644
--- a/src/core/public/rendering/app_containers.test.tsx
+++ b/src/core/public/rendering/app_containers.test.tsx
@@ -6,7 +6,7 @@
* Side Public License, v 1.
*/
-import { BehaviorSubject, of } from 'rxjs';
+import { BehaviorSubject } from 'rxjs';
import { act } from 'react-dom/test-utils';
import { mount } from 'enzyme';
import React from 'react';
@@ -17,11 +17,7 @@ describe('AppWrapper', () => {
it('toggles the `hidden-chrome` class depending on the chrome visibility state', () => {
const chromeVisible$ = new BehaviorSubject(true);
- const component = mount(
-
- app-content
-
- );
+ const component = mount(app-content);
expect(component.getDOMNode()).toMatchInlineSnapshot(`
{/* The App Wrapper outside of the fixed headers that accepts custom class names from apps */}
-
+
{/* Affixes a div to restrict the position of charts tooltip to the visible viewport minus the header */}
diff --git a/src/core/server/core_app/core_app.test.ts b/src/core/server/core_app/core_app.test.ts
index f6a9b653ec034..e5c3a592a72c7 100644
--- a/src/core/server/core_app/core_app.test.ts
+++ b/src/core/server/core_app/core_app.test.ts
@@ -137,7 +137,7 @@ describe('CoreApp', () => {
mockResponseFactory
);
- expect(mockResponseFactory.renderAnonymousCoreApp).toHaveBeenCalled();
+ expect(mockResponseFactory.renderCoreApp).toHaveBeenCalled();
});
});
diff --git a/src/core/server/core_app/core_app.ts b/src/core/server/core_app/core_app.ts
index 35a7c57b67610..23ad78ca46d45 100644
--- a/src/core/server/core_app/core_app.ts
+++ b/src/core/server/core_app/core_app.ts
@@ -64,7 +64,7 @@ export class CoreApp {
httpResources: corePreboot.httpResources.createRegistrar(router),
router,
uiPlugins,
- onResourceNotFound: (res) => res.renderAnonymousCoreApp(),
+ onResourceNotFound: (res) => res.renderCoreApp(),
});
});
}
diff --git a/src/core/server/elasticsearch/client/client_config.test.ts b/src/core/server/elasticsearch/client/client_config.test.ts
index 7e16339b40235..7956bcc64ea2f 100644
--- a/src/core/server/elasticsearch/client/client_config.test.ts
+++ b/src/core/server/elasticsearch/client/client_config.test.ts
@@ -163,6 +163,12 @@ describe('parseClientOptions', () => {
]
`);
});
+
+ it('`caFingerprint` option', () => {
+ const options = parseClientOptions(createConfig({ caFingerprint: 'ab:cd:ef' }), false);
+
+ expect(options.caFingerprint).toBe('ab:cd:ef');
+ });
});
describe('authorization', () => {
diff --git a/src/core/server/elasticsearch/client/client_config.ts b/src/core/server/elasticsearch/client/client_config.ts
index bbbb1ac247b3b..27d6f877a5572 100644
--- a/src/core/server/elasticsearch/client/client_config.ts
+++ b/src/core/server/elasticsearch/client/client_config.ts
@@ -35,6 +35,7 @@ export type ElasticsearchClientConfig = Pick<
requestTimeout?: ElasticsearchConfig['requestTimeout'] | ClientOptions['requestTimeout'];
ssl?: Partial;
keepAlive?: boolean;
+ caFingerprint?: ClientOptions['caFingerprint'];
};
/**
@@ -96,6 +97,10 @@ export function parseClientOptions(
);
}
+ if (config.caFingerprint != null) {
+ clientOptions.caFingerprint = config.caFingerprint;
+ }
+
return clientOptions;
}
diff --git a/src/core/server/http/http_service.mock.ts b/src/core/server/http/http_service.mock.ts
index ef5e151083780..4cb1bc9867d2c 100644
--- a/src/core/server/http/http_service.mock.ts
+++ b/src/core/server/http/http_service.mock.ts
@@ -88,6 +88,7 @@ const createInternalPrebootContractMock = () => {
csp: CspConfig.DEFAULT,
externalUrl: ExternalUrlConfig.DEFAULT,
auth: createAuthMock(),
+ getServerInfo: jest.fn(),
};
return mock;
};
@@ -98,6 +99,7 @@ const createPrebootContractMock = () => {
const mock: HttpServicePrebootMock = {
registerRoutes: internalMock.registerRoutes,
basePath: createBasePathMock(),
+ getServerInfo: jest.fn(),
};
return mock;
diff --git a/src/core/server/http/http_service.test.ts b/src/core/server/http/http_service.test.ts
index 8d29e3221a2ca..4955d19668580 100644
--- a/src/core/server/http/http_service.test.ts
+++ b/src/core/server/http/http_service.test.ts
@@ -379,6 +379,7 @@ test('returns `preboot` http server contract on preboot', async () => {
auth: Symbol('auth'),
basePath: Symbol('basePath'),
csp: Symbol('csp'),
+ getServerInfo: jest.fn(),
};
mockHttpServer.mockImplementation(() => ({
@@ -397,6 +398,7 @@ test('returns `preboot` http server contract on preboot', async () => {
registerRouteHandlerContext: expect.any(Function),
registerRoutes: expect.any(Function),
registerStaticDir: expect.any(Function),
+ getServerInfo: expect.any(Function),
});
});
diff --git a/src/core/server/http/http_service.ts b/src/core/server/http/http_service.ts
index 4b9e45e271be2..538a4c065e997 100644
--- a/src/core/server/http/http_service.ts
+++ b/src/core/server/http/http_service.ts
@@ -128,6 +128,7 @@ export class HttpService
prebootSetup.registerRouterAfterListening(router);
},
+ getServerInfo: prebootSetup.getServerInfo,
};
return this.internalPreboot;
diff --git a/src/core/server/http/integration_tests/router.test.ts b/src/core/server/http/integration_tests/router.test.ts
index 5bea371d479ae..a3e872ee3ea87 100644
--- a/src/core/server/http/integration_tests/router.test.ts
+++ b/src/core/server/http/integration_tests/router.test.ts
@@ -17,7 +17,7 @@ import { loggingSystemMock } from '../../logging/logging_system.mock';
import { createHttpServer } from '../test_utils';
import { HttpService } from '../http_service';
import { Router } from '../router';
-import { loggerMock } from '@kbn/logging/target/mocks';
+import { loggerMock } from '@kbn/logging/mocks';
let server: HttpService;
let logger: ReturnType;
diff --git a/src/core/server/http/types.ts b/src/core/server/http/types.ts
index 7353f48b47194..89d0d72017082 100644
--- a/src/core/server/http/types.ts
+++ b/src/core/server/http/types.ts
@@ -142,6 +142,11 @@ export interface HttpServicePreboot {
* See {@link IBasePath}.
*/
basePath: IBasePath;
+
+ /**
+ * Provides common {@link HttpServerInfo | information} about the running preboot http server.
+ */
+ getServerInfo: () => HttpServerInfo;
}
/** @internal */
@@ -155,6 +160,7 @@ export interface InternalHttpServicePreboot
| 'registerStaticDir'
| 'registerRouteHandlerContext'
| 'server'
+ | 'getServerInfo'
> {
registerRoutes(path: string, callback: (router: IRouter) => void): void;
}
diff --git a/src/core/server/logging/logger.mock.ts b/src/core/server/logging/logger.mock.ts
index efab15b7bf5f4..cfabaeb72adf7 100644
--- a/src/core/server/logging/logger.mock.ts
+++ b/src/core/server/logging/logger.mock.ts
@@ -6,5 +6,5 @@
* Side Public License, v 1.
*/
-export { loggerMock } from '@kbn/logging/target/mocks';
-export type { MockedLogger } from '@kbn/logging/target/mocks';
+export { loggerMock } from '@kbn/logging/mocks';
+export type { MockedLogger } from '@kbn/logging/mocks';
diff --git a/src/core/server/metrics/collectors/cgroup.test.ts b/src/core/server/metrics/collectors/cgroup.test.ts
index 298a143717d84..269437f026f2f 100644
--- a/src/core/server/metrics/collectors/cgroup.test.ts
+++ b/src/core/server/metrics/collectors/cgroup.test.ts
@@ -7,7 +7,7 @@
*/
import mockFs from 'mock-fs';
-import { loggerMock } from '@kbn/logging/target/mocks';
+import { loggerMock } from '@kbn/logging/mocks';
import { OsCgroupMetricsCollector } from './cgroup';
describe('OsCgroupMetricsCollector', () => {
diff --git a/src/core/server/metrics/collectors/os.test.ts b/src/core/server/metrics/collectors/os.test.ts
index 37373ea14c339..5592038f1416a 100644
--- a/src/core/server/metrics/collectors/os.test.ts
+++ b/src/core/server/metrics/collectors/os.test.ts
@@ -8,7 +8,7 @@
jest.mock('getos', () => (cb: Function) => cb(null, { dist: 'distrib', release: 'release' }));
-import { loggerMock } from '@kbn/logging/target/mocks';
+import { loggerMock } from '@kbn/logging/mocks';
import os from 'os';
import { cgroupCollectorMock } from './os.test.mocks';
import { OsMetricsCollector } from './os';
diff --git a/src/core/server/metrics/ops_metrics_collector.test.ts b/src/core/server/metrics/ops_metrics_collector.test.ts
index e966c7e0a8095..3faa771db1dae 100644
--- a/src/core/server/metrics/ops_metrics_collector.test.ts
+++ b/src/core/server/metrics/ops_metrics_collector.test.ts
@@ -6,7 +6,7 @@
* Side Public License, v 1.
*/
-import { loggerMock } from '@kbn/logging/target/mocks';
+import { loggerMock } from '@kbn/logging/mocks';
import {
mockOsCollector,
mockProcessCollector,
diff --git a/src/core/server/plugins/plugin_context.ts b/src/core/server/plugins/plugin_context.ts
index b972c6078ca2b..cbefdae525180 100644
--- a/src/core/server/plugins/plugin_context.ts
+++ b/src/core/server/plugins/plugin_context.ts
@@ -115,6 +115,7 @@ export function createPluginPrebootSetupContext(
http: {
registerRoutes: deps.http.registerRoutes,
basePath: deps.http.basePath,
+ getServerInfo: deps.http.getServerInfo,
},
preboot: {
isSetupOnHold: deps.preboot.isSetupOnHold,
diff --git a/src/core/server/saved_objects/migrationsv2/README.md b/src/core/server/saved_objects/migrationsv2/README.md
index fcfff14ec98be..5bdc548987842 100644
--- a/src/core/server/saved_objects/migrationsv2/README.md
+++ b/src/core/server/saved_objects/migrationsv2/README.md
@@ -1,17 +1,419 @@
-## TODO
- - [ ] Should we adopt the naming convention of event log `.kibana-event-log-8.0.0-000001`?
- - [ ] Can we detect and throw if there's an auto-created `.kibana` index
- with inferred mappings? If we detect this we cannot assume that `.kibana`
- contains all the latest documents. Our algorithm might also fail because we
- clone the `.kibana` index with it's faulty mappings which can prevent us
- from updating the mappings to the correct ones. We can ask users to verify
- their indices to identify where the most up to date documents are located
- (e.g. in `.kibana`, `.kibana_N` or perhaps a combination of both). We can
- prepare a `.kibana_7.11.0_001` index and ask users to manually reindex
- documents into this index.
-
-## Manual QA Test Plan
-### 1. Legacy pre-migration
+- [Introduction](#introduction)
+- [Algorithm steps](#algorithm-steps)
+ - [INIT](#init)
+ - [Next action](#next-action)
+ - [New control state](#new-control-state)
+ - [CREATE_NEW_TARGET](#create_new_target)
+ - [Next action](#next-action-1)
+ - [New control state](#new-control-state-1)
+ - [LEGACY_SET_WRITE_BLOCK](#legacy_set_write_block)
+ - [Next action](#next-action-2)
+ - [New control state](#new-control-state-2)
+ - [LEGACY_CREATE_REINDEX_TARGET](#legacy_create_reindex_target)
+ - [Next action](#next-action-3)
+ - [New control state](#new-control-state-3)
+ - [LEGACY_REINDEX](#legacy_reindex)
+ - [Next action](#next-action-4)
+ - [New control state](#new-control-state-4)
+ - [LEGACY_REINDEX_WAIT_FOR_TASK](#legacy_reindex_wait_for_task)
+ - [Next action](#next-action-5)
+ - [New control state](#new-control-state-5)
+ - [LEGACY_DELETE](#legacy_delete)
+ - [Next action](#next-action-6)
+ - [New control state](#new-control-state-6)
+ - [WAIT_FOR_YELLOW_SOURCE](#wait_for_yellow_source)
+ - [Next action](#next-action-7)
+ - [New control state](#new-control-state-7)
+ - [SET_SOURCE_WRITE_BLOCK](#set_source_write_block)
+ - [Next action](#next-action-8)
+ - [New control state](#new-control-state-8)
+ - [CREATE_REINDEX_TEMP](#create_reindex_temp)
+ - [Next action](#next-action-9)
+ - [New control state](#new-control-state-9)
+ - [REINDEX_SOURCE_TO_TEMP_OPEN_PIT](#reindex_source_to_temp_open_pit)
+ - [Next action](#next-action-10)
+ - [New control state](#new-control-state-10)
+ - [REINDEX_SOURCE_TO_TEMP_READ](#reindex_source_to_temp_read)
+ - [Next action](#next-action-11)
+ - [New control state](#new-control-state-11)
+ - [REINDEX_SOURCE_TO_TEMP_INDEX](#reindex_source_to_temp_index)
+ - [Next action](#next-action-12)
+ - [New control state](#new-control-state-12)
+ - [REINDEX_SOURCE_TO_TEMP_INDEX_BULK](#reindex_source_to_temp_index_bulk)
+ - [Next action](#next-action-13)
+ - [New control state](#new-control-state-13)
+ - [REINDEX_SOURCE_TO_TEMP_CLOSE_PIT](#reindex_source_to_temp_close_pit)
+ - [Next action](#next-action-14)
+ - [New control state](#new-control-state-14)
+ - [SET_TEMP_WRITE_BLOCK](#set_temp_write_block)
+ - [Next action](#next-action-15)
+ - [New control state](#new-control-state-15)
+ - [CLONE_TEMP_TO_TARGET](#clone_temp_to_target)
+ - [Next action](#next-action-16)
+ - [New control state](#new-control-state-16)
+ - [OUTDATED_DOCUMENTS_SEARCH](#outdated_documents_search)
+ - [Next action](#next-action-17)
+ - [New control state](#new-control-state-17)
+ - [OUTDATED_DOCUMENTS_TRANSFORM](#outdated_documents_transform)
+ - [Next action](#next-action-18)
+ - [New control state](#new-control-state-18)
+ - [UPDATE_TARGET_MAPPINGS](#update_target_mappings)
+ - [Next action](#next-action-19)
+ - [New control state](#new-control-state-19)
+ - [UPDATE_TARGET_MAPPINGS_WAIT_FOR_TASK](#update_target_mappings_wait_for_task)
+ - [Next action](#next-action-20)
+ - [New control state](#new-control-state-20)
+ - [MARK_VERSION_INDEX_READY_CONFLICT](#mark_version_index_ready_conflict)
+ - [Next action](#next-action-21)
+ - [New control state](#new-control-state-21)
+- [Manual QA Test Plan](#manual-qa-test-plan)
+ - [1. Legacy pre-migration](#1-legacy-pre-migration)
+ - [2. Plugins enabled/disabled](#2-plugins-enableddisabled)
+ - [Test scenario 1 (enable a plugin after migration):](#test-scenario-1-enable-a-plugin-after-migration)
+ - [Test scenario 2 (disable a plugin after migration):](#test-scenario-2-disable-a-plugin-after-migration)
+ - [Test scenario 3 (multiple instances, enable a plugin after migration):](#test-scenario-3-multiple-instances-enable-a-plugin-after-migration)
+ - [Test scenario 4 (multiple instances, mixed plugin enabled configs):](#test-scenario-4-multiple-instances-mixed-plugin-enabled-configs)
+
+# Introduction
+In the past, the risk of downtime caused by Kibana's saved object upgrade
+migrations have discouraged users from adopting the latest features. v2
+migrations aims to solve this problem by minimizing the operational impact on
+our users.
+
+To achieve this it uses a new migration algorithm where every step of the
+algorithm is idempotent. No matter at which step a Kibana instance gets
+interrupted, it can always restart the migration from the beginning and repeat
+all the steps without requiring any user intervention. This doesn't mean
+migrations will never fail, but when they fail for intermittent reasons like
+an Elasticsearch cluster running out of heap, Kibana will automatically be
+able to successfully complete the migration once the cluster has enough heap.
+
+For more background information on the problem see the [saved object
+migrations
+RFC](https://github.com/elastic/kibana/blob/master/rfcs/text/0013_saved_object_migrations.md).
+
+# Algorithm steps
+The design goals for the algorithm was to keep downtime below 10 minutes for
+100k saved objects while guaranteeing no data loss and keeping steps as simple
+and explicit as possible.
+
+The algorithm is implemented as a state-action machine based on https://www.microsoft.com/en-us/research/uploads/prod/2016/12/Computation-and-State-Machines.pdf
+
+The state-action machine defines it's behaviour in steps. Each step is a
+transition from a control state s_i to the contral state s_i+1 caused by an
+action a_i.
+
+```
+s_i -> a_i -> s_i+1
+s_i+1 -> a_i+1 -> s_i+2
+```
+
+Given a control state s1, `next(s1)` returns the next action to execute.
+Actions are asynchronous, once the action resolves, we can use the action
+response to determine the next state to transition to as defined by the
+function `model(state, response)`.
+
+We can then loosely define a step as:
+```
+s_i+1 = model(s_i, await next(s_i)())
+```
+
+When there are no more actions returned by `next` the state-action machine
+terminates such as in the DONE and FATAL control states.
+
+What follows is a list of all control states. For each control state the
+following is described:
+ - _next action_: the next action triggered by the current control state
+ - _new control state_: based on the action response, the possible new control states that the machine will transition to
+
+Since the algorithm runs once for each saved object index the steps below
+always reference a single saved object index `.kibana`. When Kibana starts up,
+all the steps are also repeated for the `.kibana_task_manager` index but this
+is left out of the description for brevity.
+
+## INIT
+### Next action
+`fetchIndices`
+
+Fetch the saved object indices, mappings and aliases to find the source index
+and determine whether we’re migrating from a legacy index or a v1 migrations
+index.
+
+### New control state
+1. If `.kibana` and the version specific aliases both exists and are pointing
+to the same index. This version's migration has already been completed. Since
+the same version could have plugins enabled at any time that would introduce
+new transforms or mappings.
+ → `OUTDATED_DOCUMENTS_SEARCH`
+
+2. If `.kibana` is pointing to an index that belongs to a later version of
+Kibana .e.g. a 7.11.0 instance found the `.kibana` alias pointing to
+`.kibana_7.12.0_001` fail the migration
+ → `FATAL`
+
+3. If the `.kibana` alias exists we’re migrating from either a v1 or v2 index
+and the migration source index is the index the `.kibana` alias points to.
+ → `WAIT_FOR_YELLOW_SOURCE`
+
+4. If `.kibana` is a concrete index, we’re migrating from a legacy index
+ → `LEGACY_SET_WRITE_BLOCK`
+
+5. If there are no `.kibana` indices, this is a fresh deployment. Initialize a
+ new saved objects index
+ → `CREATE_NEW_TARGET`
+
+## CREATE_NEW_TARGET
+### Next action
+`createIndex`
+
+Create the target index. This operation is idempotent, if the index already exist, we wait until its status turns yellow
+
+### New control state
+ → `MARK_VERSION_INDEX_READY`
+
+## LEGACY_SET_WRITE_BLOCK
+### Next action
+`setWriteBlock`
+
+Set a write block on the legacy index to prevent any older Kibana instances
+from writing to the index while the migration is in progress which could cause
+lost acknowledged writes.
+
+This is the first of a series of `LEGACY_*` control states that will:
+ - reindex the concrete legacy `.kibana` index into a `.kibana_pre6.5.0_001` index
+ - delete the concrete `.kibana` _index_ so that we're able to create a `.kibana` _alias_
+
+### New control state
+1. If the write block was successfully added
+ → `LEGACY_CREATE_REINDEX_TARGET`
+2. If the write block failed because the index doesn't exist, it means another instance already completed the legacy pre-migration. Proceed to the next step.
+ → `LEGACY_CREATE_REINDEX_TARGET`
+
+## LEGACY_CREATE_REINDEX_TARGET
+### Next action
+`createIndex`
+
+Create a new `.kibana_pre6.5.0_001` index into which we can reindex the legacy
+index. (Since the task manager index was converted from a data index into a
+saved objects index in 7.4 it will be reindexed into `.kibana_pre7.4.0_001`)
+### New control state
+ → `LEGACY_REINDEX`
+
+## LEGACY_REINDEX
+### Next action
+`reindex`
+
+Let Elasticsearch reindex the legacy index into `.kibana_pre6.5.0_001`. (For
+the task manager index we specify a `preMigrationScript` to convert the
+original task manager documents into valid saved objects)
+### New control state
+ → `LEGACY_REINDEX_WAIT_FOR_TASK`
+
+
+## LEGACY_REINDEX_WAIT_FOR_TASK
+### Next action
+`waitForReindexTask`
+
+Wait for up to 60s for the reindex task to complete.
+### New control state
+1. If the reindex task completed
+ → `LEGACY_DELETE`
+2. If the reindex task failed with a `target_index_had_write_block` or
+ `index_not_found_exception` another instance already completed this step
+ → `LEGACY_DELETE`
+3. If the reindex task is still in progress
+ → `LEGACY_REINDEX_WAIT_FOR_TASK`
+
+## LEGACY_DELETE
+### Next action
+`updateAliases`
+
+Use the updateAliases API to atomically remove the legacy index and create a
+new `.kibana` alias that points to `.kibana_pre6.5.0_001`.
+### New control state
+1. If the action succeeds
+ → `SET_SOURCE_WRITE_BLOCK`
+2. If the action fails with `remove_index_not_a_concrete_index` or
+ `index_not_found_exception` another instance has already completed this step.
+ → `SET_SOURCE_WRITE_BLOCK`
+
+## WAIT_FOR_YELLOW_SOURCE
+### Next action
+`waitForIndexStatusYellow`
+
+Wait for the Elasticsearch cluster to be in "yellow" state. It means the index's primary shard is allocated and the index is ready for searching/indexing documents, but ES wasn't able to allocate the replicas.
+We don't have as much data redundancy as we could have, but it's enough to start the migration.
+
+### New control state
+ → `SET_SOURCE_WRITE_BLOCK`
+
+## SET_SOURCE_WRITE_BLOCK
+### Next action
+`setWriteBlock`
+
+Set a write block on the source index to prevent any older Kibana instances from writing to the index while the migration is in progress which could cause lost acknowledged writes.
+
+### New control state
+ → `CREATE_REINDEX_TEMP`
+
+## CREATE_REINDEX_TEMP
+### Next action
+`createIndex`
+
+This operation is idempotent, if the index already exist, we wait until its status turns yellow.
+
+- Because we will be transforming documents before writing them into this index, we can already set the mappings to the target mappings for this version. The source index might contain documents belonging to a disabled plugin. So set `dynamic: false` mappings for any unknown saved object types.
+- (Since we never query the temporary index we can potentially disable refresh to speed up indexing performance. Profile to see if gains justify complexity)
+
+### New control state
+ → `REINDEX_SOURCE_TO_TEMP_OPEN_PIT`
+
+## REINDEX_SOURCE_TO_TEMP_OPEN_PIT
+### Next action
+`openPIT`
+
+Open a PIT. Since there is a write block on the source index there is basically no overhead to keeping the PIT so we can lean towards a larger `keep_alive` value like 10 minutes.
+### New control state
+ → `REINDEX_SOURCE_TO_TEMP_READ`
+
+## REINDEX_SOURCE_TO_TEMP_READ
+### Next action
+`readNextBatchOfSourceDocuments`
+
+Read the next batch of outdated documents from the source index by using search after with our PIT.
+
+### New control state
+1. If the batch contained > 0 documents
+ → `REINDEX_SOURCE_TO_TEMP_INDEX`
+2. If there are no more documents returned
+ → `REINDEX_SOURCE_TO_TEMP_CLOSE_PIT`
+
+## REINDEX_SOURCE_TO_TEMP_INDEX
+### Next action
+`transformRawDocs`
+
+Transform the current batch of documents
+
+In order to support sharing saved objects to multiple spaces in 8.0, the
+transforms will also regenerate document `_id`'s. To ensure that this step
+remains idempotent, the new `_id` is deterministically generated using UUIDv5
+ensuring that each Kibana instance generates the same new `_id` for the same document.
+### New control state
+ → `REINDEX_SOURCE_TO_TEMP_INDEX_BULK`
+## REINDEX_SOURCE_TO_TEMP_INDEX_BULK
+### Next action
+`bulkIndexTransformedDocuments`
+
+Use the bulk API create action to write a batch of up-to-date documents. The
+create action ensures that there will be only one write per reindexed document
+even if multiple Kibana instances are performing this step. Use
+`refresh=false` to speed up the create actions, the `UPDATE_TARGET_MAPPINGS`
+step will ensure that the index is refreshed before we start serving traffic.
+
+The following errors are ignored because it means another instance already
+completed this step:
+ - documents already exist in the temp index
+ - temp index has a write block
+ - temp index is not found
+### New control state
+ → `REINDEX_SOURCE_TO_TEMP_READ`
+
+## REINDEX_SOURCE_TO_TEMP_CLOSE_PIT
+### Next action
+`closePIT`
+
+### New control state
+ → `SET_TEMP_WRITE_BLOCK`
+
+## SET_TEMP_WRITE_BLOCK
+### Next action
+`setWriteBlock`
+
+Set a write block on the temporary index so that we can clone it.
+### New control state
+ → `CLONE_TEMP_TO_TARGET`
+
+## CLONE_TEMP_TO_TARGET
+### Next action
+`cloneIndex`
+
+Ask elasticsearch to clone the temporary index into the target index. If the target index already exists (because another node already started the clone operation), wait until the clone is complete by waiting for a yellow index status.
+
+We can’t use the temporary index as our target index because one instance can complete the migration, delete a document, and then a second instance starts the reindex operation and re-creates the deleted document. By cloning the temporary index and only accepting writes/deletes from the cloned target index, we prevent lost acknowledged deletes.
+
+### New control state
+ → `OUTDATED_DOCUMENTS_SEARCH`
+
+## OUTDATED_DOCUMENTS_SEARCH
+### Next action
+`searchForOutdatedDocuments`
+
+Search for outdated saved object documents. Will return one batch of
+documents.
+
+If another instance has a disabled plugin it will reindex that plugin's
+documents without transforming them. Because this instance doesn't know which
+plugins were disabled by the instance that performed the
+`REINDEX_SOURCE_TO_TEMP_INDEX` step, we need to search for outdated documents
+and transform them to ensure that everything is up to date.
+
+### New control state
+1. Found outdated documents?
+ → `OUTDATED_DOCUMENTS_TRANSFORM`
+2. All documents up to date
+ → `UPDATE_TARGET_MAPPINGS`
+
+## OUTDATED_DOCUMENTS_TRANSFORM
+### Next action
+`transformRawDocs` + `bulkOverwriteTransformedDocuments`
+
+Once transformed we use an index operation to overwrite the outdated document with the up-to-date version. Optimistic concurrency control ensures that we only overwrite the document once so that any updates/writes by another instance which already completed the migration aren’t overwritten and lost.
+
+### New control state
+ → `OUTDATED_DOCUMENTS_SEARCH`
+
+## UPDATE_TARGET_MAPPINGS
+### Next action
+`updateAndPickupMappings`
+
+If another instance has some plugins disabled it will disable the mappings of that plugin's types when creating the temporary index. This action will
+update the mappings and then use an update_by_query to ensure that all fields are “picked-up” and ready to be searched over.
+
+### New control state
+ → `UPDATE_TARGET_MAPPINGS_WAIT_FOR_TASK`
+
+## UPDATE_TARGET_MAPPINGS_WAIT_FOR_TASK
+### Next action
+`updateAliases`
+
+Atomically apply the `versionIndexReadyActions` using the _alias actions API. By performing the following actions we guarantee that if multiple versions of Kibana started the upgrade in parallel, only one version will succeed.
+
+1. verify that the current alias is still pointing to the source index
+2. Point the version alias and the current alias to the target index.
+3. Remove the temporary index
+
+### New control state
+1. If all the actions succeed we’re ready to serve traffic
+ → `DONE`
+2. If action (1) fails with alias_not_found_exception or action (3) fails with index_not_found_exception another instance already completed the migration
+ → `MARK_VERSION_INDEX_READY_CONFLICT`
+
+## MARK_VERSION_INDEX_READY_CONFLICT
+### Next action
+`fetchIndices`
+
+Fetch the saved object indices
+
+### New control state
+If another instance completed a migration from the same source we need to verify that it is running the same version.
+
+1. If the current and version aliases are pointing to the same index the instance that completed the migration was on the same version and it’s safe to start serving traffic.
+ → `DONE`
+2. If the other instance was running a different version we fail the migration. Once we restart one of two things can happen: the other instance is an older version and we will restart the migration, or, it’s a newer version and we will refuse to start up.
+ → `FATAL`
+
+# Manual QA Test Plan
+## 1. Legacy pre-migration
When upgrading from a legacy index additional steps are required before the
regular migration process can start.
@@ -45,7 +447,7 @@ Test plan:
get restarted. Given enough time, it should always be able to
successfully complete the migration.
-For a successful migration the following behaviour should be observed:
+For a successful migration the following behaviour should be observed:
1. The `.kibana` index should be reindexed into a `.kibana_pre6.5.0` index
2. The `.kibana` index should be deleted
3. The `.kibana_index_template` should be deleted
@@ -54,12 +456,12 @@ For a successful migration the following behaviour should be observed:
6. Once migration has completed, the `.kibana_current` and `.kibana_7.11.0`
aliases should point to the `.kibana_7.11.0_001` index.
-### 2. Plugins enabled/disabled
+## 2. Plugins enabled/disabled
Kibana plugins can be disabled/enabled at any point in time. We need to ensure
that Saved Object documents are migrated for all the possible sequences of
enabling, disabling, before or after a version upgrade.
-#### Test scenario 1 (enable a plugin after migration):
+### Test scenario 1 (enable a plugin after migration):
1. Start an old version of Kibana (< 7.11)
2. Create a document that we know will be migrated in a later version (i.e.
create a `dashboard`)
@@ -70,7 +472,7 @@ enabling, disabling, before or after a version upgrade.
7. Ensure that the document from step (2) has been migrated
(`migrationVersion` contains 7.11.0)
-#### Test scenario 2 (disable a plugin after migration):
+### Test scenario 2 (disable a plugin after migration):
1. Start an old version of Kibana (< 7.11)
2. Create a document that we know will be migrated in a later version (i.e.
create a `dashboard`)
@@ -80,11 +482,11 @@ enabling, disabling, before or after a version upgrade.
7. Ensure that Kibana logs a warning, but continues to start even though there
are saved object documents which don't belong to an enable plugin
-#### Test scenario 2 (multiple instances, enable a plugin after migration):
+### Test scenario 3 (multiple instances, enable a plugin after migration):
Follow the steps from 'Test scenario 1', but perform the migration with
multiple instances of Kibana
-#### Test scenario 3 (multiple instances, mixed plugin enabled configs):
+### Test scenario 4 (multiple instances, mixed plugin enabled configs):
We don't support this upgrade scenario, but it's worth making sure we don't
have data loss when there's a user error.
1. Start an old version of Kibana (< 7.11)
@@ -97,4 +499,3 @@ have data loss when there's a user error.
5. Ensure that the document from step (2) has been migrated
(`migrationVersion` contains 7.11.0)
-###
\ No newline at end of file
diff --git a/src/core/server/saved_objects/migrationsv2/actions/bulk_overwrite_transformed_documents.ts b/src/core/server/saved_objects/migrationsv2/actions/bulk_overwrite_transformed_documents.ts
index d0259f8f21ca4..4217ca599297a 100644
--- a/src/core/server/saved_objects/migrationsv2/actions/bulk_overwrite_transformed_documents.ts
+++ b/src/core/server/saved_objects/migrationsv2/actions/bulk_overwrite_transformed_documents.ts
@@ -15,9 +15,13 @@ import {
catchRetryableEsClientErrors,
RetryableEsClientError,
} from './catch_retryable_es_client_errors';
-import { isWriteBlockException } from './es_errors';
+import { isWriteBlockException, isIndexNotFoundException } from './es_errors';
import { WAIT_FOR_ALL_SHARDS_TO_BE_ACTIVE } from './constants';
-import type { TargetIndexHadWriteBlock, RequestEntityTooLargeException } from './index';
+import type {
+ TargetIndexHadWriteBlock,
+ RequestEntityTooLargeException,
+ IndexNotFound,
+} from './index';
/** @internal */
export interface BulkOverwriteTransformedDocumentsParams {
@@ -37,7 +41,10 @@ export const bulkOverwriteTransformedDocuments = ({
transformedDocs,
refresh = false,
}: BulkOverwriteTransformedDocumentsParams): TaskEither.TaskEither<
- RetryableEsClientError | TargetIndexHadWriteBlock | RequestEntityTooLargeException,
+ | RetryableEsClientError
+ | TargetIndexHadWriteBlock
+ | IndexNotFound
+ | RequestEntityTooLargeException,
'bulk_index_succeeded'
> => () => {
return client
@@ -87,6 +94,12 @@ export const bulkOverwriteTransformedDocuments = ({
type: 'target_index_had_write_block' as const,
});
}
+ if (errors.every(isIndexNotFoundException)) {
+ return Either.left({
+ type: 'index_not_found_exception' as const,
+ index,
+ });
+ }
throw new Error(JSON.stringify(errors));
}
})
diff --git a/src/core/server/saved_objects/migrationsv2/actions/es_errors.ts b/src/core/server/saved_objects/migrationsv2/actions/es_errors.ts
index 0d3c9fe3741aa..49b996bb118d8 100644
--- a/src/core/server/saved_objects/migrationsv2/actions/es_errors.ts
+++ b/src/core/server/saved_objects/migrationsv2/actions/es_errors.ts
@@ -21,3 +21,7 @@ export const isWriteBlockException = ({ type, reason }: EsErrorCause): boolean =
export const isIncompatibleMappingException = ({ type }: EsErrorCause): boolean => {
return type === 'strict_dynamic_mapping_exception' || type === 'mapper_parsing_exception';
};
+
+export const isIndexNotFoundException = ({ type }: EsErrorCause): boolean => {
+ return type === 'index_not_found_exception';
+};
diff --git a/src/core/server/saved_objects/migrationsv2/model/model.test.ts b/src/core/server/saved_objects/migrationsv2/model/model.test.ts
index 30612b82d58aa..f24d175f416a7 100644
--- a/src/core/server/saved_objects/migrationsv2/model/model.test.ts
+++ b/src/core/server/saved_objects/migrationsv2/model/model.test.ts
@@ -1154,6 +1154,16 @@ describe('migrations v2 model', () => {
expect(newState.retryCount).toEqual(0);
expect(newState.retryDelay).toEqual(0);
});
+ test('REINDEX_SOURCE_TO_TEMP_INDEX_BULK -> REINDEX_SOURCE_TO_TEMP_CLOSE_PIT if response is left index_not_found_exception', () => {
+ const res: ResponseType<'REINDEX_SOURCE_TO_TEMP_INDEX_BULK'> = Either.left({
+ type: 'index_not_found_exception',
+ index: 'the_temp_index',
+ });
+ const newState = model(reindexSourceToTempIndexBulkState, res);
+ expect(newState.controlState).toEqual('REINDEX_SOURCE_TO_TEMP_CLOSE_PIT');
+ expect(newState.retryCount).toEqual(0);
+ expect(newState.retryDelay).toEqual(0);
+ });
test('REINDEX_SOURCE_TO_TEMP_INDEX_BULK -> FATAL if action returns left request_entity_too_large_exception', () => {
const res: ResponseType<'REINDEX_SOURCE_TO_TEMP_INDEX_BULK'> = Either.left({
type: 'request_entity_too_large_exception',
@@ -1529,18 +1539,28 @@ describe('migrations v2 model', () => {
hasTransformedDocs: false,
progress: createInitialProgress(),
};
- test('TRANSFORMED_DOCUMENTS_BULK_INDEX should throw a throwBadResponse error if action failed', () => {
+
+ test('TRANSFORMED_DOCUMENTS_BULK_INDEX throws if action returns left index_not_found_exception', () => {
const res: ResponseType<'TRANSFORMED_DOCUMENTS_BULK_INDEX'> = Either.left({
- type: 'retryable_es_client_error',
- message: 'random documents bulk index error',
+ type: 'index_not_found_exception',
+ index: 'the_target_index',
});
- const newState = model(
- transformedDocumentsBulkIndexState,
- res
- ) as TransformedDocumentsBulkIndex;
- expect(newState.controlState).toEqual('TRANSFORMED_DOCUMENTS_BULK_INDEX');
- expect(newState.retryCount).toEqual(1);
- expect(newState.retryDelay).toEqual(2000);
+ expect(() =>
+ model(transformedDocumentsBulkIndexState, res)
+ ).toThrowErrorMatchingInlineSnapshot(
+ `"TRANSFORMED_DOCUMENTS_BULK_INDEX received unexpected action response: {\\"type\\":\\"index_not_found_exception\\",\\"index\\":\\"the_target_index\\"}"`
+ );
+ });
+
+ test('TRANSFORMED_DOCUMENTS_BULK_INDEX throws if action returns left target_index_had_write_block', () => {
+ const res: ResponseType<'TRANSFORMED_DOCUMENTS_BULK_INDEX'> = Either.left({
+ type: 'target_index_had_write_block',
+ });
+ expect(() =>
+ model(transformedDocumentsBulkIndexState, res)
+ ).toThrowErrorMatchingInlineSnapshot(
+ `"TRANSFORMED_DOCUMENTS_BULK_INDEX received unexpected action response: {\\"type\\":\\"target_index_had_write_block\\"}"`
+ );
});
test('TRANSFORMED_DOCUMENTS_BULK_INDEX -> FATAL if action returns left request_entity_too_large_exception', () => {
diff --git a/src/core/server/saved_objects/migrationsv2/model/model.ts b/src/core/server/saved_objects/migrationsv2/model/model.ts
index 01c1893154c6c..50be4a524f5c5 100644
--- a/src/core/server/saved_objects/migrationsv2/model/model.ts
+++ b/src/core/server/saved_objects/migrationsv2/model/model.ts
@@ -533,9 +533,13 @@ export const model = (currentState: State, resW: ResponseType):
transformErrors: [],
};
} else {
- if (isLeftTypeof(res.left, 'target_index_had_write_block')) {
- // the temp index has a write block, meaning that another instance already finished and moved forward.
- // close the PIT search and carry on with the happy path.
+ if (
+ isLeftTypeof(res.left, 'target_index_had_write_block') ||
+ isLeftTypeof(res.left, 'index_not_found_exception')
+ ) {
+ // When the temp index has a write block or has been deleted another
+ // instance already completed this step. Close the PIT search and carry
+ // on with the happy path.
return {
...stateP,
controlState: 'REINDEX_SOURCE_TO_TEMP_CLOSE_PIT',
@@ -721,9 +725,13 @@ export const model = (currentState: State, resW: ResponseType):
controlState: 'FATAL',
reason: `While indexing a batch of saved objects, Elasticsearch returned a 413 Request Entity Too Large exception. Try to use smaller batches by changing the Kibana 'migrations.batchSize' configuration option and restarting Kibana.`,
};
- } else if (isLeftTypeof(res.left, 'target_index_had_write_block')) {
- // we fail on this error since the target index will only have a write
- // block if a newer version of Kibana started an upgrade
+ } else if (
+ isLeftTypeof(res.left, 'target_index_had_write_block') ||
+ isLeftTypeof(res.left, 'index_not_found_exception')
+ ) {
+ // we fail on these errors since the target index will never get
+ // deleted and should only have a write block if a newer version of
+ // Kibana started an upgrade
throwBadResponse(stateP, res.left as never);
} else {
throwBadResponse(stateP, res.left);
diff --git a/src/core/server/saved_objects/service/lib/point_in_time_finder.test.ts b/src/core/server/saved_objects/service/lib/point_in_time_finder.test.ts
index 044bb45269538..160852f9160b7 100644
--- a/src/core/server/saved_objects/service/lib/point_in_time_finder.test.ts
+++ b/src/core/server/saved_objects/service/lib/point_in_time_finder.test.ts
@@ -7,7 +7,6 @@
*/
import { loggerMock, MockedLogger } from '../../../logging/logger.mock';
-import type { SavedObjectsClientContract } from '../../types';
import type { SavedObjectsFindResult } from '../';
import { savedObjectsRepositoryMock } from './repository.mock';
@@ -43,37 +42,67 @@ const mockHits = [
describe('createPointInTimeFinder()', () => {
let logger: MockedLogger;
- let find: jest.Mocked['find'];
- let openPointInTimeForType: jest.Mocked['openPointInTimeForType'];
- let closePointInTime: jest.Mocked['closePointInTime'];
+ let repository: ReturnType;
beforeEach(() => {
logger = loggerMock.create();
- const mockRepository = savedObjectsRepositoryMock.create();
- find = mockRepository.find;
- openPointInTimeForType = mockRepository.openPointInTimeForType;
- closePointInTime = mockRepository.closePointInTime;
+ repository = savedObjectsRepositoryMock.create();
});
describe('#find', () => {
- test('throws if a PIT is already open', async () => {
- openPointInTimeForType.mockResolvedValueOnce({
+ test('opens a PIT with the correct parameters', async () => {
+ repository.openPointInTimeForType.mockResolvedValueOnce({
id: 'abc123',
});
- find.mockResolvedValueOnce({
+ repository.find.mockResolvedValue({
total: 2,
saved_objects: mockHits,
pit_id: 'abc123',
per_page: 1,
page: 0,
});
- find.mockResolvedValueOnce({
- total: 2,
- saved_objects: mockHits,
- pit_id: 'abc123',
- per_page: 1,
- page: 1,
+
+ const findOptions: SavedObjectsCreatePointInTimeFinderOptions = {
+ type: ['visualization'],
+ search: 'foo*',
+ perPage: 1,
+ namespaces: ['ns1', 'ns2'],
+ };
+
+ const finder = new PointInTimeFinder(findOptions, {
+ logger,
+ client: repository,
+ });
+
+ expect(repository.openPointInTimeForType).not.toHaveBeenCalled();
+
+ await finder.find().next();
+
+ expect(repository.openPointInTimeForType).toHaveBeenCalledTimes(1);
+ expect(repository.openPointInTimeForType).toHaveBeenCalledWith(findOptions.type, {
+ namespaces: findOptions.namespaces,
});
+ });
+
+ test('throws if a PIT is already open', async () => {
+ repository.openPointInTimeForType.mockResolvedValueOnce({
+ id: 'abc123',
+ });
+ repository.find
+ .mockResolvedValueOnce({
+ total: 2,
+ saved_objects: mockHits,
+ pit_id: 'abc123',
+ per_page: 1,
+ page: 0,
+ })
+ .mockResolvedValueOnce({
+ total: 2,
+ saved_objects: mockHits,
+ pit_id: 'abc123',
+ per_page: 1,
+ page: 1,
+ });
const findOptions: SavedObjectsCreatePointInTimeFinderOptions = {
type: ['visualization'],
@@ -83,30 +112,25 @@ describe('createPointInTimeFinder()', () => {
const finder = new PointInTimeFinder(findOptions, {
logger,
- client: {
- find,
- openPointInTimeForType,
- closePointInTime,
- },
+ client: repository,
});
await finder.find().next();
- expect(find).toHaveBeenCalledTimes(1);
- find.mockClear();
+ expect(repository.find).toHaveBeenCalledTimes(1);
expect(async () => {
await finder.find().next();
}).rejects.toThrowErrorMatchingInlineSnapshot(
`"Point In Time has already been opened for this finder instance. Please call \`close()\` before calling \`find()\` again."`
);
- expect(find).toHaveBeenCalledTimes(0);
+ expect(repository.find).toHaveBeenCalledTimes(1);
});
test('works with a single page of results', async () => {
- openPointInTimeForType.mockResolvedValueOnce({
+ repository.openPointInTimeForType.mockResolvedValueOnce({
id: 'abc123',
});
- find.mockResolvedValueOnce({
+ repository.find.mockResolvedValueOnce({
total: 2,
saved_objects: mockHits,
pit_id: 'abc123',
@@ -121,11 +145,7 @@ describe('createPointInTimeFinder()', () => {
const finder = new PointInTimeFinder(findOptions, {
logger,
- client: {
- find,
- openPointInTimeForType,
- closePointInTime,
- },
+ client: repository,
});
const hits: SavedObjectsFindResult[] = [];
for await (const result of finder.find()) {
@@ -133,10 +153,10 @@ describe('createPointInTimeFinder()', () => {
}
expect(hits.length).toBe(2);
- expect(openPointInTimeForType).toHaveBeenCalledTimes(1);
- expect(closePointInTime).toHaveBeenCalledTimes(1);
- expect(find).toHaveBeenCalledTimes(1);
- expect(find).toHaveBeenCalledWith(
+ expect(repository.openPointInTimeForType).toHaveBeenCalledTimes(1);
+ expect(repository.closePointInTime).toHaveBeenCalledTimes(1);
+ expect(repository.find).toHaveBeenCalledTimes(1);
+ expect(repository.find).toHaveBeenCalledWith(
expect.objectContaining({
pit: expect.objectContaining({ id: 'abc123', keepAlive: '2m' }),
sortField: 'updated_at',
@@ -147,24 +167,25 @@ describe('createPointInTimeFinder()', () => {
});
test('works with multiple pages of results', async () => {
- openPointInTimeForType.mockResolvedValueOnce({
+ repository.openPointInTimeForType.mockResolvedValueOnce({
id: 'abc123',
});
- find.mockResolvedValueOnce({
- total: 2,
- saved_objects: [mockHits[0]],
- pit_id: 'abc123',
- per_page: 1,
- page: 0,
- });
- find.mockResolvedValueOnce({
- total: 2,
- saved_objects: [mockHits[1]],
- pit_id: 'abc123',
- per_page: 1,
- page: 0,
- });
- find.mockResolvedValueOnce({
+ repository.find
+ .mockResolvedValueOnce({
+ total: 2,
+ saved_objects: [mockHits[0]],
+ pit_id: 'abc123',
+ per_page: 1,
+ page: 0,
+ })
+ .mockResolvedValueOnce({
+ total: 2,
+ saved_objects: [mockHits[1]],
+ pit_id: 'abc123',
+ per_page: 1,
+ page: 0,
+ });
+ repository.find.mockResolvedValueOnce({
total: 2,
saved_objects: [],
per_page: 1,
@@ -180,11 +201,7 @@ describe('createPointInTimeFinder()', () => {
const finder = new PointInTimeFinder(findOptions, {
logger,
- client: {
- find,
- openPointInTimeForType,
- closePointInTime,
- },
+ client: repository,
});
const hits: SavedObjectsFindResult[] = [];
for await (const result of finder.find()) {
@@ -192,12 +209,12 @@ describe('createPointInTimeFinder()', () => {
}
expect(hits.length).toBe(2);
- expect(openPointInTimeForType).toHaveBeenCalledTimes(1);
- expect(closePointInTime).toHaveBeenCalledTimes(1);
+ expect(repository.openPointInTimeForType).toHaveBeenCalledTimes(1);
+ expect(repository.closePointInTime).toHaveBeenCalledTimes(1);
// called 3 times since we need a 3rd request to check if we
// are done paginating through results.
- expect(find).toHaveBeenCalledTimes(3);
- expect(find).toHaveBeenCalledWith(
+ expect(repository.find).toHaveBeenCalledTimes(3);
+ expect(repository.find).toHaveBeenCalledWith(
expect.objectContaining({
pit: expect.objectContaining({ id: 'abc123', keepAlive: '2m' }),
sortField: 'updated_at',
@@ -210,10 +227,10 @@ describe('createPointInTimeFinder()', () => {
describe('#close', () => {
test('calls closePointInTime with correct ID', async () => {
- openPointInTimeForType.mockResolvedValueOnce({
+ repository.openPointInTimeForType.mockResolvedValueOnce({
id: 'test',
});
- find.mockResolvedValueOnce({
+ repository.find.mockResolvedValueOnce({
total: 1,
saved_objects: [mockHits[0]],
pit_id: 'test',
@@ -229,11 +246,7 @@ describe('createPointInTimeFinder()', () => {
const finder = new PointInTimeFinder(findOptions, {
logger,
- client: {
- find,
- openPointInTimeForType,
- closePointInTime,
- },
+ client: repository,
});
const hits: SavedObjectsFindResult[] = [];
for await (const result of finder.find()) {
@@ -241,28 +254,28 @@ describe('createPointInTimeFinder()', () => {
await finder.close();
}
- expect(closePointInTime).toHaveBeenCalledWith('test');
+ expect(repository.closePointInTime).toHaveBeenCalledWith('test');
});
test('causes generator to stop', async () => {
- openPointInTimeForType.mockResolvedValueOnce({
+ repository.openPointInTimeForType.mockResolvedValueOnce({
id: 'test',
});
- find.mockResolvedValueOnce({
+ repository.find.mockResolvedValueOnce({
total: 2,
saved_objects: [mockHits[0]],
pit_id: 'test',
per_page: 1,
page: 0,
});
- find.mockResolvedValueOnce({
+ repository.find.mockResolvedValueOnce({
total: 2,
saved_objects: [mockHits[1]],
pit_id: 'test',
per_page: 1,
page: 0,
});
- find.mockResolvedValueOnce({
+ repository.find.mockResolvedValueOnce({
total: 2,
saved_objects: [],
per_page: 1,
@@ -278,11 +291,7 @@ describe('createPointInTimeFinder()', () => {
const finder = new PointInTimeFinder(findOptions, {
logger,
- client: {
- find,
- openPointInTimeForType,
- closePointInTime,
- },
+ client: repository,
});
const hits: SavedObjectsFindResult[] = [];
for await (const result of finder.find()) {
@@ -290,15 +299,15 @@ describe('createPointInTimeFinder()', () => {
await finder.close();
}
- expect(closePointInTime).toHaveBeenCalledTimes(1);
+ expect(repository.closePointInTime).toHaveBeenCalledTimes(1);
expect(hits.length).toBe(1);
});
test('is called if `find` throws an error', async () => {
- openPointInTimeForType.mockResolvedValueOnce({
+ repository.openPointInTimeForType.mockResolvedValueOnce({
id: 'test',
});
- find.mockRejectedValueOnce(new Error('oops'));
+ repository.find.mockRejectedValueOnce(new Error('oops'));
const findOptions: SavedObjectsCreatePointInTimeFinderOptions = {
type: ['visualization'],
@@ -308,11 +317,7 @@ describe('createPointInTimeFinder()', () => {
const finder = new PointInTimeFinder(findOptions, {
logger,
- client: {
- find,
- openPointInTimeForType,
- closePointInTime,
- },
+ client: repository,
});
const hits: SavedObjectsFindResult[] = [];
try {
@@ -323,27 +328,28 @@ describe('createPointInTimeFinder()', () => {
// intentionally empty
}
- expect(closePointInTime).toHaveBeenCalledWith('test');
+ expect(repository.closePointInTime).toHaveBeenCalledWith('test');
});
test('finder can be reused after closing', async () => {
- openPointInTimeForType.mockResolvedValueOnce({
+ repository.openPointInTimeForType.mockResolvedValueOnce({
id: 'abc123',
});
- find.mockResolvedValueOnce({
- total: 2,
- saved_objects: mockHits,
- pit_id: 'abc123',
- per_page: 1,
- page: 0,
- });
- find.mockResolvedValueOnce({
- total: 2,
- saved_objects: mockHits,
- pit_id: 'abc123',
- per_page: 1,
- page: 1,
- });
+ repository.find
+ .mockResolvedValueOnce({
+ total: 2,
+ saved_objects: mockHits,
+ pit_id: 'abc123',
+ per_page: 1,
+ page: 0,
+ })
+ .mockResolvedValueOnce({
+ total: 2,
+ saved_objects: mockHits,
+ pit_id: 'abc123',
+ per_page: 1,
+ page: 1,
+ });
const findOptions: SavedObjectsCreatePointInTimeFinderOptions = {
type: ['visualization'],
@@ -353,11 +359,7 @@ describe('createPointInTimeFinder()', () => {
const finder = new PointInTimeFinder(findOptions, {
logger,
- client: {
- find,
- openPointInTimeForType,
- closePointInTime,
- },
+ client: repository,
});
const findA = finder.find();
@@ -370,9 +372,9 @@ describe('createPointInTimeFinder()', () => {
expect((await findA.next()).done).toBe(true);
expect((await findB.next()).done).toBe(true);
- expect(openPointInTimeForType).toHaveBeenCalledTimes(2);
- expect(find).toHaveBeenCalledTimes(2);
- expect(closePointInTime).toHaveBeenCalledTimes(2);
+ expect(repository.openPointInTimeForType).toHaveBeenCalledTimes(2);
+ expect(repository.find).toHaveBeenCalledTimes(2);
+ expect(repository.closePointInTime).toHaveBeenCalledTimes(2);
});
});
});
diff --git a/src/core/server/saved_objects/service/lib/point_in_time_finder.ts b/src/core/server/saved_objects/service/lib/point_in_time_finder.ts
index f0ed943c585e5..d11be250ad0a9 100644
--- a/src/core/server/saved_objects/service/lib/point_in_time_finder.ts
+++ b/src/core/server/saved_objects/service/lib/point_in_time_finder.ts
@@ -139,7 +139,9 @@ export class PointInTimeFinder
private async open() {
try {
- const { id } = await this.#client.openPointInTimeForType(this.#findOptions.type);
+ const { id } = await this.#client.openPointInTimeForType(this.#findOptions.type, {
+ namespaces: this.#findOptions.namespaces,
+ });
this.#pitId = id;
this.#open = true;
} catch (e) {
diff --git a/src/core/server/saved_objects/service/saved_objects_client.ts b/src/core/server/saved_objects/service/saved_objects_client.ts
index 00d47d8d1fb03..1564df2969ecc 100644
--- a/src/core/server/saved_objects/service/saved_objects_client.ts
+++ b/src/core/server/saved_objects/service/saved_objects_client.ts
@@ -334,7 +334,7 @@ export interface SavedObjectsResolveResponse {
/**
* @public
*/
-export interface SavedObjectsOpenPointInTimeOptions extends SavedObjectsBaseOptions {
+export interface SavedObjectsOpenPointInTimeOptions {
/**
* Optionally specify how long ES should keep the PIT alive until the next request. Defaults to `5m`.
*/
@@ -343,6 +343,15 @@ export interface SavedObjectsOpenPointInTimeOptions extends SavedObjectsBaseOpti
* An optional ES preference value to be used for the query.
*/
preference?: string;
+ /**
+ * An optional list of namespaces to be used when opening the PIT.
+ *
+ * When the spaces plugin is enabled:
+ * - this will default to the user's current space (as determined by the URL)
+ * - if specified, the user's current space will be ignored
+ * - `['*']` will search across all available spaces
+ */
+ namespaces?: string[];
}
/**
diff --git a/src/core/server/server.api.md b/src/core/server/server.api.md
index 47455e0c14316..67b08f4c0d9b7 100644
--- a/src/core/server/server.api.md
+++ b/src/core/server/server.api.md
@@ -807,6 +807,7 @@ export type ElasticsearchClientConfig = Pick;
keepAlive?: boolean;
+ caFingerprint?: ClientOptions['caFingerprint'];
};
// @public
@@ -1003,6 +1004,7 @@ export interface HttpServerInfo {
// @public
export interface HttpServicePreboot {
basePath: IBasePath;
+ getServerInfo: () => HttpServerInfo;
registerRoutes(path: string, callback: (router: IRouter) => void): void;
}
@@ -2460,8 +2462,9 @@ export interface SavedObjectsMigrationVersion {
export type SavedObjectsNamespaceType = 'single' | 'multiple' | 'multiple-isolated' | 'agnostic';
// @public (undocumented)
-export interface SavedObjectsOpenPointInTimeOptions extends SavedObjectsBaseOptions {
+export interface SavedObjectsOpenPointInTimeOptions {
keepAlive?: string;
+ namespaces?: string[];
preference?: string;
}
diff --git a/src/dev/build/tasks/copy_source_task.ts b/src/dev/build/tasks/copy_source_task.ts
index e0ec0340904dc..dd9f331ac263d 100644
--- a/src/dev/build/tasks/copy_source_task.ts
+++ b/src/dev/build/tasks/copy_source_task.ts
@@ -31,6 +31,7 @@ export const CopySource: Task = {
'!src/dev/**',
'!src/setup_node_env/babel_register/index.js',
'!src/setup_node_env/babel_register/register.js',
+ '!**/jest.config.js',
'!src/plugins/telemetry/schema/**', // Skip telemetry schemas
'!**/public/**/*.{js,ts,tsx,json,scss}',
'typings/**',
diff --git a/src/dev/typescript/projects.ts b/src/dev/typescript/projects.ts
index 0244cb2cd9115..e3d8185e73e55 100644
--- a/src/dev/typescript/projects.ts
+++ b/src/dev/typescript/projects.ts
@@ -70,6 +70,8 @@ export const PROJECTS = [
...findProjects('packages/*/tsconfig.json'),
...findProjects('src/plugins/*/tsconfig.json'),
+ ...findProjects('src/plugins/chart_expressions/*/tsconfig.json'),
+ ...findProjects('src/plugins/vis_types/*/tsconfig.json'),
...findProjects('x-pack/plugins/*/tsconfig.json'),
...findProjects('examples/*/tsconfig.json'),
...findProjects('x-pack/examples/*/tsconfig.json'),
diff --git a/src/plugins/chart_expressions/expression_tagcloud/.i18nrc.json b/src/plugins/chart_expressions/expression_tagcloud/.i18nrc.json
new file mode 100755
index 0000000000000..df4e39309f98e
--- /dev/null
+++ b/src/plugins/chart_expressions/expression_tagcloud/.i18nrc.json
@@ -0,0 +1,6 @@
+{
+ "prefix": "expressionTagcloud",
+ "paths": {
+ "expressionTagcloud": "."
+ }
+}
diff --git a/src/plugins/chart_expressions/expression_tagcloud/README.md b/src/plugins/chart_expressions/expression_tagcloud/README.md
new file mode 100755
index 0000000000000..ae7635ffe0173
--- /dev/null
+++ b/src/plugins/chart_expressions/expression_tagcloud/README.md
@@ -0,0 +1,9 @@
+# expressionTagcloud
+
+Expression Tagcloud plugin adds a `tagcloud` renderer and function to the expression plugin. The renderer will display the `Wordcloud` chart.
+
+---
+
+## Development
+
+See the [kibana contributing guide](https://github.com/elastic/kibana/blob/master/CONTRIBUTING.md) for instructions setting up your development environment.
diff --git a/src/plugins/chart_expressions/expression_tagcloud/common/constants.ts b/src/plugins/chart_expressions/expression_tagcloud/common/constants.ts
new file mode 100644
index 0000000000000..3d834448a94ef
--- /dev/null
+++ b/src/plugins/chart_expressions/expression_tagcloud/common/constants.ts
@@ -0,0 +1,12 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+export const PLUGIN_ID = 'expressionTagcloud';
+export const PLUGIN_NAME = 'expressionTagcloud';
+
+export const EXPRESSION_NAME = 'tagcloud';
diff --git a/src/plugins/vis_type_tagcloud/public/__snapshots__/tag_cloud_fn.test.ts.snap b/src/plugins/chart_expressions/expression_tagcloud/common/expression_functions/__snapshots__/tagcloud_function.test.ts.snap
similarity index 98%
rename from src/plugins/vis_type_tagcloud/public/__snapshots__/tag_cloud_fn.test.ts.snap
rename to src/plugins/chart_expressions/expression_tagcloud/common/expression_functions/__snapshots__/tagcloud_function.test.ts.snap
index 2888d7637546c..56b24f0ae004f 100644
--- a/src/plugins/vis_type_tagcloud/public/__snapshots__/tag_cloud_fn.test.ts.snap
+++ b/src/plugins/chart_expressions/expression_tagcloud/common/expression_functions/__snapshots__/tagcloud_function.test.ts.snap
@@ -22,7 +22,7 @@ Object {
exports[`interpreter/functions#tagcloud returns an object with the correct structure 1`] = `
Object {
- "as": "tagloud_vis",
+ "as": "tagcloud",
"type": "render",
"value": Object {
"syncColors": false,
diff --git a/src/plugins/chart_expressions/expression_tagcloud/common/expression_functions/index.ts b/src/plugins/chart_expressions/expression_tagcloud/common/expression_functions/index.ts
new file mode 100644
index 0000000000000..5df32e3991edc
--- /dev/null
+++ b/src/plugins/chart_expressions/expression_tagcloud/common/expression_functions/index.ts
@@ -0,0 +1,13 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+import { tagcloudFunction } from './tagcloud_function';
+
+export const functions = [tagcloudFunction];
+
+export { tagcloudFunction };
diff --git a/src/plugins/vis_type_tagcloud/public/tag_cloud_fn.test.ts b/src/plugins/chart_expressions/expression_tagcloud/common/expression_functions/tagcloud_function.test.ts
similarity index 82%
rename from src/plugins/vis_type_tagcloud/public/tag_cloud_fn.test.ts
rename to src/plugins/chart_expressions/expression_tagcloud/common/expression_functions/tagcloud_function.test.ts
index 1671c0b01a666..2c6e021b5107a 100644
--- a/src/plugins/vis_type_tagcloud/public/tag_cloud_fn.test.ts
+++ b/src/plugins/chart_expressions/expression_tagcloud/common/expression_functions/tagcloud_function.test.ts
@@ -6,13 +6,13 @@
* Side Public License, v 1.
*/
-import { createTagCloudFn } from './tag_cloud_fn';
+import { tagcloudFunction } from './tagcloud_function';
-import { functionWrapper } from '../../expressions/common/expression_functions/specs/tests/utils';
-import { Datatable } from '../../expressions/common/expression_types/specs';
+import { functionWrapper } from '../../../../expressions/common/expression_functions/specs/tests/utils';
+import { Datatable } from '../../../../expressions/common/expression_types/specs';
describe('interpreter/functions#tagcloud', () => {
- const fn = functionWrapper(createTagCloudFn());
+ const fn = functionWrapper(tagcloudFunction());
const context = {
type: 'datatable',
rows: [{ 'col-0-1': 0 }],
diff --git a/src/plugins/chart_expressions/expression_tagcloud/common/expression_functions/tagcloud_function.ts b/src/plugins/chart_expressions/expression_tagcloud/common/expression_functions/tagcloud_function.ts
new file mode 100644
index 0000000000000..c3553c4660ce9
--- /dev/null
+++ b/src/plugins/chart_expressions/expression_tagcloud/common/expression_functions/tagcloud_function.ts
@@ -0,0 +1,164 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+import { i18n } from '@kbn/i18n';
+
+import { prepareLogTable, Dimension } from '../../../../visualizations/common/prepare_log_table';
+import { TagCloudVisParams } from '../types';
+import { ExpressionTagcloudFunction } from '../types';
+import { EXPRESSION_NAME } from '../constants';
+
+const strings = {
+ help: i18n.translate('expressionTagcloud.functions.tagcloudHelpText', {
+ defaultMessage: 'Tagcloud visualization.',
+ }),
+ args: {
+ scale: i18n.translate('expressionTagcloud.functions.tagcloud.args.scaleHelpText', {
+ defaultMessage: 'Scale to determine font size of a word',
+ }),
+ orientation: i18n.translate('expressionTagcloud.functions.tagcloud.args.orientationHelpText', {
+ defaultMessage: 'Orientation of words inside tagcloud',
+ }),
+ minFontSize: i18n.translate('expressionTagcloud.functions.tagcloud.args.minFontSizeHelpText', {
+ defaultMessage: 'Min font size',
+ }),
+ maxFontSize: i18n.translate('expressionTagcloud.functions.tagcloud.args.maxFontSizeHelpText', {
+ defaultMessage: 'Max font size',
+ }),
+ showLabel: i18n.translate('expressionTagcloud.functions.tagcloud.args.showLabelHelpText', {
+ defaultMessage: 'Show chart label',
+ }),
+ palette: i18n.translate('expressionTagcloud.functions.tagcloud.args.paletteHelpText', {
+ defaultMessage: 'Defines the chart palette name',
+ }),
+ metric: i18n.translate('expressionTagcloud.functions.tagcloud.args.metricHelpText', {
+ defaultMessage: 'metric dimension configuration',
+ }),
+ bucket: i18n.translate('expressionTagcloud.functions.tagcloud.args.bucketHelpText', {
+ defaultMessage: 'bucket dimension configuration',
+ }),
+ },
+ dimension: {
+ tags: i18n.translate('expressionTagcloud.functions.tagcloud.dimension.tags', {
+ defaultMessage: 'Tags',
+ }),
+ tagSize: i18n.translate('expressionTagcloud.functions.tagcloud.dimension.tagSize', {
+ defaultMessage: 'Tag size',
+ }),
+ },
+};
+
+export const errors = {
+ invalidPercent: (percent: number) =>
+ new Error(
+ i18n.translate('expressionTagcloud.functions.tagcloud.invalidPercentErrorMessage', {
+ defaultMessage: "Invalid value: '{percent}'. Percentage must be between 0 and 1",
+ values: {
+ percent,
+ },
+ })
+ ),
+ invalidImageUrl: (imageUrl: string) =>
+ new Error(
+ i18n.translate('expressionTagcloud.functions.tagcloud.invalidImageUrl', {
+ defaultMessage: "Invalid image url: '{imageUrl}'.",
+ values: {
+ imageUrl,
+ },
+ })
+ ),
+};
+
+export const tagcloudFunction: ExpressionTagcloudFunction = () => {
+ const { help, args: argHelp, dimension } = strings;
+
+ return {
+ name: EXPRESSION_NAME,
+ type: 'render',
+ inputTypes: ['datatable'],
+ help,
+ args: {
+ scale: {
+ types: ['string'],
+ default: 'linear',
+ options: ['linear', 'log', 'square root'],
+ help: argHelp.scale,
+ },
+ orientation: {
+ types: ['string'],
+ default: 'single',
+ options: ['single', 'right angled', 'multiple'],
+ help: argHelp.orientation,
+ },
+ minFontSize: {
+ types: ['number'],
+ default: 18,
+ help: argHelp.minFontSize,
+ },
+ maxFontSize: {
+ types: ['number'],
+ default: 72,
+ help: argHelp.maxFontSize,
+ },
+ showLabel: {
+ types: ['boolean'],
+ default: true,
+ help: argHelp.showLabel,
+ },
+ palette: {
+ types: ['string'],
+ help: argHelp.palette,
+ default: 'default',
+ },
+ metric: {
+ types: ['vis_dimension'],
+ help: argHelp.metric,
+ required: true,
+ },
+ bucket: {
+ types: ['vis_dimension'],
+ help: argHelp.bucket,
+ },
+ },
+ fn(input, args, handlers) {
+ const visParams = {
+ scale: args.scale,
+ orientation: args.orientation,
+ minFontSize: args.minFontSize,
+ maxFontSize: args.maxFontSize,
+ showLabel: args.showLabel,
+ metric: args.metric,
+ ...(args.bucket && {
+ bucket: args.bucket,
+ }),
+ palette: {
+ type: 'palette',
+ name: args.palette,
+ },
+ } as TagCloudVisParams;
+
+ if (handlers?.inspectorAdapters?.tables) {
+ const argsTable: Dimension[] = [[[args.metric], dimension.tagSize]];
+ if (args.bucket) {
+ argsTable.push([[args.bucket], dimension.tags]);
+ }
+ const logTable = prepareLogTable(input, argsTable);
+ handlers.inspectorAdapters.tables.logDatatable('default', logTable);
+ }
+ return {
+ type: 'render',
+ as: EXPRESSION_NAME,
+ value: {
+ visData: input,
+ visType: EXPRESSION_NAME,
+ visParams,
+ syncColors: handlers?.isSyncColorsEnabled?.() ?? false,
+ },
+ };
+ },
+ };
+};
diff --git a/packages/kbn-optimizer/babel.config.js b/src/plugins/chart_expressions/expression_tagcloud/common/index.ts
old mode 100644
new mode 100755
similarity index 78%
rename from packages/kbn-optimizer/babel.config.js
rename to src/plugins/chart_expressions/expression_tagcloud/common/index.ts
index e3a412717fb6e..d8989abcc3d6f
--- a/packages/kbn-optimizer/babel.config.js
+++ b/src/plugins/chart_expressions/expression_tagcloud/common/index.ts
@@ -6,7 +6,4 @@
* Side Public License, v 1.
*/
-module.exports = {
- presets: ['@kbn/babel-preset/node_preset'],
- ignore: ['**/*.test.js'],
-};
+export * from './constants';
diff --git a/src/plugins/chart_expressions/expression_tagcloud/common/types/expression_functions.ts b/src/plugins/chart_expressions/expression_tagcloud/common/types/expression_functions.ts
new file mode 100644
index 0000000000000..b1aba30380b59
--- /dev/null
+++ b/src/plugins/chart_expressions/expression_tagcloud/common/types/expression_functions.ts
@@ -0,0 +1,61 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+import { PaletteOutput } from '../../../../charts/common';
+import {
+ Datatable,
+ ExpressionFunctionDefinition,
+ ExpressionValueRender,
+ SerializedFieldFormat,
+} from '../../../../expressions';
+import { ExpressionValueVisDimension } from '../../../../visualizations/common';
+import { EXPRESSION_NAME } from '../constants';
+
+interface Dimension {
+ accessor: number;
+ format: {
+ id?: string;
+ params?: SerializedFieldFormat