Skip to content

Commit

Permalink
Merge branch '7.9_Definitions_Update_For_Console' of github.com:cuff-…
Browse files Browse the repository at this point in the history
…links/kibana into 7.9_Definitions_Update_For_Console
  • Loading branch information
John Dorlus committed Jul 31, 2020
2 parents 2ca8d67 + eb573df commit 62cb086
Show file tree
Hide file tree
Showing 1,145 changed files with 37,220 additions and 12,321 deletions.
35 changes: 19 additions & 16 deletions .ci/Jenkinsfile_baseline_capture
Original file line number Diff line number Diff line change
Expand Up @@ -4,22 +4,25 @@ library 'kibana-pipeline-library'
kibanaLibrary.load()

kibanaPipeline(timeoutMinutes: 120) {
ciStats.trackBuild {
catchError {
parallel([
'oss-visualRegression': {
workers.ci(name: 'oss-visualRegression', size: 's-highmem', ramDisk: true) {
kibanaPipeline.functionalTestProcess('oss-visualRegression', './test/scripts/jenkins_visual_regression.sh')(1)
}
},
'xpack-visualRegression': {
workers.ci(name: 'xpack-visualRegression', size: 's-highmem', ramDisk: true) {
kibanaPipeline.functionalTestProcess('xpack-visualRegression', './test/scripts/jenkins_xpack_visual_regression.sh')(1)
}
},
])
}
githubCommitStatus.trackBuild(params.commit, 'kibana-ci-baseline') {
ciStats.trackBuild {
catchError {
parallel([
'oss-visualRegression': {
workers.ci(name: 'oss-visualRegression', size: 's-highmem', ramDisk: true) {
kibanaPipeline.functionalTestProcess('oss-visualRegression', './test/scripts/jenkins_visual_regression.sh')(1)
}
},
'xpack-visualRegression': {
workers.ci(name: 'xpack-visualRegression', size: 's-highmem', ramDisk: true) {
kibanaPipeline.functionalTestProcess('xpack-visualRegression', './test/scripts/jenkins_xpack_visual_regression.sh')(1)
}
},
])
}

kibanaPipeline.sendMail()
kibanaPipeline.sendMail()
slackNotifications.onFailure()
}
}
}
6 changes: 6 additions & 0 deletions .ci/Jenkinsfile_baseline_trigger
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,12 @@ kibanaLibrary.load()

withGithubCredentials {
branches.each { branch ->
if (branch == '6.8') {
// skip 6.8, it is tracked but we don't need snapshots for it and haven't backported
// the baseline capture scripts to it.
return;
}

stage(branch) {
def commits = getCommits(branch, MAXIMUM_COMMITS_TO_CHECK, MAXIMUM_COMMITS_TO_BUILD)

Expand Down
4 changes: 4 additions & 0 deletions .ci/pipeline-library/src/test/KibanaBasePipelineTest.groovy
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,10 @@ class KibanaBasePipelineTest extends BasePipelineTest {
return helper.callStack.find { it.methodName == name }
}

def fnMocks(String name) {
helper.callStack.findAll { it.methodName == name }
}

void mockFailureBuild() {
props([
buildUtils: [
Expand Down
2 changes: 2 additions & 0 deletions .ci/pipeline-library/src/test/githubCommitStatus.groovy
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ class GithubCommitStatusTest extends KibanaBasePipelineTest {

interface BuildState {
Object get(String key)
Object has(String key)
}

interface GithubApi {
Expand All @@ -25,6 +26,7 @@ class GithubCommitStatusTest extends KibanaBasePipelineTest {
buildStateMock = mock(BuildState)
githubApiMock = mock(GithubApi)

when(buildStateMock.has('checkoutInfo')).thenReturn(true)
when(buildStateMock.get('checkoutInfo')).thenReturn([ commit: 'COMMIT_HASH', ])
when(githubApiMock.post(any(), any())).thenReturn(null)

Expand Down
63 changes: 63 additions & 0 deletions .ci/pipeline-library/src/test/slackNotifications.groovy
Original file line number Diff line number Diff line change
Expand Up @@ -59,4 +59,67 @@ class SlackNotificationsTest extends KibanaBasePipelineTest {
args.blocks[2].text.text.toString()
)
}

@Test
void 'sendFailedBuild() should call slackSend() with a backup message when first attempt fails'() {
mockFailureBuild()
def counter = 0
helper.registerAllowedMethod('slackSend', [Map.class], { ++counter > 1 })
slackNotifications.sendFailedBuild()

def args = fnMocks('slackSend')[1].args[0]

def expected = [
channel: '#kibana-operations-alerts',
username: 'Kibana Operations',
iconEmoji: ':jenkins:',
color: 'danger',
message: ':broken_heart: elastic / kibana # master #1',
]

expected.each {
assertEquals(it.value.toString(), args[it.key].toString())
}

assertEquals(
":broken_heart: *<http://jenkins.localhost:8080/job/elastic+kibana+master/1/|elastic / kibana # master #1>*" +
"\n\nFirst attempt at sending this notification failed. Please check the build.",
args.blocks[0].text.text.toString()
)
}

@Test
void 'getTestFailures() should truncate list of failures to 10'() {
prop('testUtils', [
getFailures: {
return (1..12).collect {
return [
url: Mocks.TEST_FAILURE_URL,
fullDisplayName: "Failure #${it}",
]
}
},
])

def message = (String) slackNotifications.getTestFailures()

assertTrue("Message ends with truncated indicator", message.endsWith("...and 2 more"))
assertTrue("Message contains Failure #10", message.contains("Failure #10"))
assertTrue("Message does not contain Failure #11", !message.contains("Failure #11"))
}

@Test
void 'shortenMessage() should truncate a long message, but leave parts that fit'() {
assertEquals('Hello\nHello\n[...truncated...]', slackNotifications.shortenMessage('Hello\nHello\nthis is a long string', 29))
}

@Test
void 'shortenMessage() should not modify a short message'() {
assertEquals('Hello world', slackNotifications.shortenMessage('Hello world', 11))
}

@Test
void 'shortenMessage() should truncate an entire message with only one part'() {
assertEquals('[...truncated...]', slackNotifications.shortenMessage('Hello world this is a really long message', 40))
}
}
2 changes: 1 addition & 1 deletion Jenkinsfile
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ kibanaPipeline(timeoutMinutes: 155, checkPrChanges: true, setCommitStatus: true)
'xpack-savedObjectsFieldMetrics': kibanaPipeline.functionalTestProcess('xpack-savedObjectsFieldMetrics', './test/scripts/jenkins_xpack_saved_objects_field_metrics.sh'),
// 'xpack-pageLoadMetrics': kibanaPipeline.functionalTestProcess('xpack-pageLoadMetrics', './test/scripts/jenkins_xpack_page_load_metrics.sh'),
'xpack-securitySolutionCypress': { processNumber ->
whenChanged(['x-pack/plugins/security_solution/', 'x-pack/test/security_solution_cypress/']) {
whenChanged(['x-pack/plugins/security_solution/', 'x-pack/test/security_solution_cypress/', 'x-pack/plugins/triggers_actions_ui/public/application/sections/action_connector_form/', 'x-pack/plugins/triggers_actions_ui/public/application/context/actions_connectors_context.tsx']) {
kibanaPipeline.functionalTestProcess('xpack-securitySolutionCypress', './test/scripts/jenkins_security_solution_cypress.sh')(processNumber)
}
},
Expand Down
17 changes: 15 additions & 2 deletions docs/CHANGELOG.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -42,10 +42,23 @@ This section summarizes the changes in each release.
[[release-notes-7.8.1]]
== {kib} 7.8.1

coming::[7.8.1]

See also <<breaking-changes-7.8,breaking changes in 7.8>>.

[float]
[[security-update-7.8.1]]
=== Security updates
* In {kib} 7.8.1 and earlier, there is a denial of service (DoS) flaw in Timelion. Attackers can construct a URL that when viewed
by a {kib} user, the {kib} process consumes large amounts of CPU and becomes unresponsive,
https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-7016[CVE-2020-7016].
+
You must upgrade to 7.8.1. If you are unable to upgrade, set `timelion.enabled` to `false` in your kibana.yml file to disable Timelion.

* In all {kib} versions, region map visualizations contain a stored XSS flaw. Attackers that can edit or create region map visualizations can obtain sensitive information
or perform destructive actions on behalf of {kib} users who view the region map visualization,
https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-7017[CVE-2020-7017].
+
You must upgrade to 7.8.1. If you are unable to upgrade, set `xpack.maps.enabled`, `region_map.enabled`, and `tile_map.enabled` to `false` in kibana.yml to disable map visualizations.

[float]
[[bug-v7.8.1]]
=== Bug fixes
Expand Down
18 changes: 0 additions & 18 deletions docs/apm/error-reports-watcher.asciidoc

This file was deleted.

3 changes: 0 additions & 3 deletions docs/apm/how-to-guides.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ Learn how to perform common APM app tasks.
* <<agent-configuration>>
* <<apm-alerts>>
* <<custom-links>>
* <<errors-alerts-with-watcher>>
* <<filters>>
* <<machine-learning-integration>>
* <<advanced-queries>>
Expand All @@ -21,8 +20,6 @@ include::apm-alerts.asciidoc[]

include::custom-links.asciidoc[]

include::error-reports-watcher.asciidoc[]

include::filters.asciidoc[]

include::machine-learning.asciidoc[]
Expand Down
4 changes: 2 additions & 2 deletions docs/developer/architecture/code-exploration.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -526,9 +526,9 @@ WARNING: Missing README.
See Configuring security in Kibana.
- {kib-repo}blob/{branch}/x-pack/plugins/security_solution[securitySolution]
- {kib-repo}blob/{branch}/x-pack/plugins/security_solution/README.md[securitySolution]
WARNING: Missing README.
Welcome to the Kibana Security Solution plugin! This README will go over getting started with development and testing.
- {kib-repo}blob/{branch}/x-pack/plugins/snapshot_restore/README.md[snapshotRestore]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,5 +19,6 @@ export interface RouteConfigOptions<Method extends RouteMethod>
| [authRequired](./kibana-plugin-core-server.routeconfigoptions.authrequired.md) | <code>boolean &#124; 'optional'</code> | Defines authentication mode for a route: - true. A user has to have valid credentials to access a resource - false. A user can access a resource without any credentials. - 'optional'. A user can access a resource if has valid credentials or no credentials at all. Can be useful when we grant access to a resource but want to identify a user if possible.<!-- -->Defaults to <code>true</code> if an auth mechanism is registered. |
| [body](./kibana-plugin-core-server.routeconfigoptions.body.md) | <code>Method extends 'get' &#124; 'options' ? undefined : RouteConfigOptionsBody</code> | Additional body options [RouteConfigOptionsBody](./kibana-plugin-core-server.routeconfigoptionsbody.md)<!-- -->. |
| [tags](./kibana-plugin-core-server.routeconfigoptions.tags.md) | <code>readonly string[]</code> | Additional metadata tag strings to attach to the route. |
| [timeout](./kibana-plugin-core-server.routeconfigoptions.timeout.md) | <code>number</code> | Timeouts for processing durations. Response timeout is in milliseconds. Default value: 2 minutes |
| [xsrfRequired](./kibana-plugin-core-server.routeconfigoptions.xsrfrequired.md) | <code>Method extends 'get' ? never : boolean</code> | Defines xsrf protection requirements for a route: - true. Requires an incoming POST/PUT/DELETE request to contain <code>kbn-xsrf</code> header. - false. Disables xsrf protection.<!-- -->Set to true by default |

Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
<!-- Do not edit this file. It is automatically generated by API Documenter. -->

[Home](./index.md) &gt; [kibana-plugin-core-server](./kibana-plugin-core-server.md) &gt; [RouteConfigOptions](./kibana-plugin-core-server.routeconfigoptions.md) &gt; [timeout](./kibana-plugin-core-server.routeconfigoptions.timeout.md)

## RouteConfigOptions.timeout property

Timeouts for processing durations. Response timeout is in milliseconds. Default value: 2 minutes

<b>Signature:</b>

```typescript
timeout?: number;
```
Binary file modified docs/images/intro-data-tutorial.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added docs/images/intro-help-icon.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified docs/images/intro-kibana.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified docs/images/intro-management.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified docs/images/lens_data_info.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified docs/images/lens_drag_drop.gif
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified docs/images/lens_suggestions.gif
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified docs/images/lens_tutorial_1.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified docs/images/lens_tutorial_2.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified docs/images/lens_tutorial_3.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified docs/images/lens_viz_types.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
11 changes: 6 additions & 5 deletions docs/management/alerting/alerts-and-actions-intro.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -4,22 +4,23 @@

beta[]

The *Alerts and Actions* UI lets you <<alert-management, see and control all the alerts>> in a space, and provides tools to <<connector-management, create and manage connectors>> so that alerts can trigger actions like notification, indexing, and ticketing.
The *Alerts and Actions* UI lets you <<alert-management, see and control all the alerts>> in a space, and provides tools to <<connector-management, create and manage connectors>> so that alerts can trigger actions like notification, indexing, and ticketing.

To manage alerting and connectors, open the menu, then go to *Stack Management > {kib} > Alerts and Actions*.
To manage alerting and connectors, open the menu,
then go to *Stack Management > Alerts and Insights > Alerts and Actions*.

[role="screenshot"]
image:management/alerting/images/alerts-and-actions-ui.png[Example alert listing in the Alerts and Actions UI]

[NOTE]
============================================================================
Similar to dashboards, alerts and connectors reside in a <<xpack-spaces, space>>.
The *Alerts and Actions* UI only shows alerts and connectors for the current space.
The *Alerts and Actions* UI only shows alerts and connectors for the current space.
============================================================================

[NOTE]
============================================================================
{es} also offers alerting capabilities through Watcher, which
can be managed through the <<watcher-ui, Watcher UI>>. See
can be managed through the <<watcher-ui, Watcher UI>>. See
<<alerting-concepts-differences>> for more information.
============================================================================
============================================================================
Original file line number Diff line number Diff line change
Expand Up @@ -2,17 +2,16 @@
[[adding-policy-to-index]]
=== Adding a policy to an index

To add a lifecycle policy to an index and view the status for indices
managed by a policy, open the menu, then go to *Stack Management > {es} > Index Management*. This page lists your
{es} indices, which you can filter by lifecycle status and lifecycle phase.
To add a lifecycle policy to an index and view the status for indices
managed by a policy, open the menu, then go to *Stack Management > Data > Index Management*.
This page lists your
{es} indices, which you can filter by lifecycle status and lifecycle phase.

To add a policy, select the index name and then select *Manage Index > Add lifecycle policy*.
You’ll see the policy name, the phase the index is in, the current
action, and if any errors occurred performing that action.
You’ll see the policy name, the phase the index is in, the current
action, and if any errors occurred performing that action.

To remove a policy from an index, select *Manage Index > Remove lifecycle policy*.

[role="screenshot"]
image::images/index_management_add_policy.png[][UI for adding a policy to an index]


Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
[[example-using-index-lifecycle-policy]]
=== Tutorial: Use {ilm-init} to manage {filebeat} time-based indices

With {ilm} ({ilm-init}), you can create policies that perform actions automatically
With {ilm} ({ilm-init}), you can create policies that perform actions automatically
on indices as they age and grow. {ilm-init} policies help you to manage
performance, resilience, and retention of your data during its lifecycle. This tutorial shows
you how to use {kib}’s *Index Lifecycle Policies* to modify and create {ilm-init}
Expand Down Expand Up @@ -59,7 +59,7 @@ output as described in {filebeat-ref}/filebeat-getting-started.html[Getting Star
{filebeat} includes a default {ilm-init} policy that enables rollover. {ilm-init}
is enabled automatically if you’re using the default `filebeat.yml` and index template.

To view the default policy in {kib}, open the menu, go to * Stack Management > {es} > Index Lifecycle Policies*,
To view the default policy in {kib}, open the menu, go to *Stack Management > Data > Index Lifecycle Policies*,
search for _filebeat_, and choose the _filebeat-version_ policy.

This policy initiates the rollover action when the index size reaches 50GB or
Expand Down Expand Up @@ -114,7 +114,7 @@ If meeting a specific retention time period is most important, you can create a
custom policy. For this option, you will use {filebeat} daily indices without
rollover.

. To create a custom policy, open the menu, go to *Stack Management > {es} > Index Lifecycle Policies*, then click
. To create a custom policy, open the menu, go to *Stack Management > Data > Index Lifecycle Policies*, then click
*Create policy*.

. Activate the warm phase and configure it as follows:
Expand Down Expand Up @@ -156,7 +156,7 @@ image::images/tutorial-ilm-custom-policy.png["Modify the custom policy to add a
[role="screenshot"]
image::images/tutorial-ilm-delete-phase-creation.png["Delete phase"]

. To configure the index to use the new policy, open the menu, then go to *Stack Management > {es} > Index Lifecycle
. To configure the index to use the new policy, open the menu, then go to *Stack Management > Data > Index Lifecycle
Policies*.

.. Find your {ilm-init} policy.
Expand Down
4 changes: 2 additions & 2 deletions docs/management/ingest-pipelines/ingest-pipelines.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ pipelines that perform common transformations and
enrichments on your data. For example, you might remove a field,
rename an existing field, or set a new field.

You’ll find *Ingest Node Pipelines* in *Management > Elasticsearch*. With this feature, you can:
You’ll find *Ingest Node Pipelines* in *Stack Management > Ingest*. With this feature, you can:

* View a list of your pipelines and drill down into details.
* Create a pipeline that defines a series of tasks, known as processors.
Expand All @@ -23,7 +23,7 @@ image:management/ingest-pipelines/images/ingest-pipeline-list.png["Ingest node p
The minimum required permissions to access *Ingest Node Pipelines* are
the `manage_pipeline` and `cluster:monitor/nodes/info` cluster privileges.

You can add these privileges in *Management > Security > Roles*.
You can add these privileges in *Stack Management > Security > Roles*.

[role="screenshot"]
image:management/ingest-pipelines/images/ingest-pipeline-privileges.png["Privileges required for Ingest Node Pipelines"]
Expand Down
7 changes: 4 additions & 3 deletions docs/management/managing-beats.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,8 @@

include::{asciidoc-dir}/../../shared/discontinued.asciidoc[tag=cm-discontinued]

To use the Central Management UI, open the menu, go to *Stack Management > {beats} > Central Management*, then define and
To use {beats} Central Management UI, open the menu, go to *Stack Management > Ingest >
{beats} Central Management*, then define and
manage configurations in a central location in {kib} and quickly deploy
configuration changes to all {beats} running across your enterprise. For more
about central management, see the related {beats} documentation:
Expand All @@ -17,8 +18,8 @@ about central management, see the related {beats} documentation:
This feature requires an Elastic license that includes {beats} central
management.
Don't have a license? You can start a 30-day trial. Open the menu, go to
*Stack Management > Elasticsearch > License Management*. At the end of the trial
Don't have a license? You can start a 30-day trial. Open the menu,
go to *Stack Management > Stack > License Management*. At the end of the trial
period, you can purchase a subscription to keep using central management. For
more information, see https://www.elastic.co/subscriptions and
<<managing-licenses>>.
Expand Down
2 changes: 1 addition & 1 deletion docs/management/managing-ccr.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ remote clusters on a local cluster. {ref}/xpack-ccr.html[Cross-cluster replicati
is commonly used to provide remote backups for disaster recovery and for
geo-proximite copies of data.

To get started, open the menu, then go to *Stack Management > Elasticsearch > Cross-Cluster Replication*.
To get started, open the menu, then go to *Stack Management > Data > Cross-Cluster Replication*.

[role="screenshot"]
image::images/cross-cluster-replication-list-view.png[][Cross-cluster replication list view]
Expand Down
Loading

0 comments on commit 62cb086

Please sign in to comment.